From 839adbdad02464d7e9ba91f7e059747aa186a7e1 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 30 Jan 2020 13:58:15 -0500 Subject: [PATCH 01/20] Upgrade gogo/protobuf to 1.3.0 Signed-off-by: Cyril Tovena --- build-image/Dockerfile | 4 +- pkg/chunk/storage/caching_index_client.pb.go | 128 ++- pkg/distributor/ha_tracker.pb.go | 46 +- pkg/ingester/client/cortex.pb.go | 1087 +++++++++++------- pkg/ingester/wal.pb.go | 255 ++-- pkg/querier/frontend/frontend.pb.go | 84 +- pkg/querier/queryrange/queryrange.pb.go | 440 ++++--- pkg/ring/ring.pb.go | 120 +- pkg/ruler/rules/rules.pb.go | 449 ++++---- 9 files changed, 1537 insertions(+), 1076 deletions(-) diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 0ce1f07e82..add7a6fbd8 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -16,8 +16,8 @@ RUN curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/in RUN GO111MODULE=on go get -tags netgo \ github.com/client9/misspell/cmd/misspell@v0.3.4 \ github.com/golang/protobuf/protoc-gen-go@v1.3.1 \ - github.com/gogo/protobuf/protoc-gen-gogoslick@v1.2.1 \ - github.com/gogo/protobuf/gogoproto@v1.2.1 && \ + github.com/gogo/protobuf/protoc-gen-gogoslick@v1.3.0 \ + github.com/gogo/protobuf/gogoproto@v1.3.0 && \ rm -rf /go/pkg /go/src ENV KUBEVAL_VERSION=0.14.0 diff --git a/pkg/chunk/storage/caching_index_client.pb.go b/pkg/chunk/storage/caching_index_client.pb.go index 6f60ba6801..c90cffbbc3 100644 --- a/pkg/chunk/storage/caching_index_client.pb.go +++ b/pkg/chunk/storage/caching_index_client.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -22,7 +23,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Entry struct { Column Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=Bytes" json:"Column"` @@ -42,7 +43,7 @@ func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Entry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -84,7 +85,7 @@ func (m *ReadBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadBatch.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -268,7 +269,7 @@ func valueToGoStringCachingIndexClient(v interface{}, typ string) string { func (m *Entry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -276,33 +277,42 @@ func (m *Entry) Marshal() (dAtA []byte, err error) { } func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Column.Size())) - n1, err := m.Column.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Value.Size())) - n2, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size := m.Column.Size() + i -= size + if _, err := m.Column.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReadBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -310,49 +320,59 @@ func (m *ReadBatch) Marshal() (dAtA []byte, err error) { } func (m *ReadBatch) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - dAtA[i] = 0xa - i++ - i = encodeVarintCachingIndexClient(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCachingIndexClient(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) + if m.Cardinality != 0 { + i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x20 } if m.Expiry != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Expiry)) + i-- + dAtA[i] = 0x18 } - if m.Cardinality != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality)) + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintCachingIndexClient(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCachingIndexClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintCachingIndexClient(dAtA []byte, offset int, v uint64) int { + offset -= sovCachingIndexClient(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Entry) Size() (n int) { if m == nil { @@ -393,14 +413,7 @@ func (m *ReadBatch) Size() (n int) { } func sovCachingIndexClient(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozCachingIndexClient(x uint64) (n int) { return sovCachingIndexClient(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -420,8 +433,13 @@ func (this *ReadBatch) String() string { if this == nil { return "nil" } + repeatedStringForEntries := "[]Entry{" + for _, f := range this.Entries { + repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + "," + } + repeatedStringForEntries += "}" s := strings.Join([]string{`&ReadBatch{`, - `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "Entry", "Entry", 1), `&`, ``, 1) + `,`, + `Entries:` + repeatedStringForEntries + `,`, `Key:` + fmt.Sprintf("%v", this.Key) + `,`, `Expiry:` + fmt.Sprintf("%v", this.Expiry) + `,`, `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, diff --git a/pkg/distributor/ha_tracker.pb.go b/pkg/distributor/ha_tracker.pb.go index aa241c2c53..ea52dd2f90 100644 --- a/pkg/distributor/ha_tracker.pb.go +++ b/pkg/distributor/ha_tracker.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -22,7 +23,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type ReplicaDesc struct { Replica string `protobuf:"bytes,1,opt,name=replica,proto3" json:"replica,omitempty"` @@ -42,7 +43,7 @@ func (m *ReplicaDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ReplicaDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -147,7 +148,7 @@ func valueToGoStringHaTracker(v interface{}, typ string) string { func (m *ReplicaDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -155,32 +156,40 @@ func (m *ReplicaDesc) Marshal() (dAtA []byte, err error) { } func (m *ReplicaDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Replica) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHaTracker(dAtA, i, uint64(len(m.Replica))) - i += copy(dAtA[i:], m.Replica) - } if m.ReceivedAt != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintHaTracker(dAtA, i, uint64(m.ReceivedAt)) + i-- + dAtA[i] = 0x10 + } + if len(m.Replica) > 0 { + i -= len(m.Replica) + copy(dAtA[i:], m.Replica) + i = encodeVarintHaTracker(dAtA, i, uint64(len(m.Replica))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintHaTracker(dAtA []byte, offset int, v uint64) int { + offset -= sovHaTracker(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ReplicaDesc) Size() (n int) { if m == nil { @@ -199,14 +208,7 @@ func (m *ReplicaDesc) Size() (n int) { } func sovHaTracker(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozHaTracker(x uint64) (n int) { return sovHaTracker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) diff --git a/pkg/ingester/client/cortex.pb.go b/pkg/ingester/client/cortex.pb.go index c97908a367..4bd6772008 100644 --- a/pkg/ingester/client/cortex.pb.go +++ b/pkg/ingester/client/cortex.pb.go @@ -11,8 +11,11 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" io "io" math "math" + math_bits "math/bits" reflect "reflect" strconv "strconv" strings "strings" @@ -27,7 +30,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type MatchType int32 @@ -95,7 +98,7 @@ func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -137,7 +140,7 @@ func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -173,7 +176,7 @@ func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -216,7 +219,7 @@ func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -261,7 +264,7 @@ func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -318,7 +321,7 @@ func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -362,7 +365,7 @@ func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -405,7 +408,7 @@ func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -448,7 +451,7 @@ func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -490,7 +493,7 @@ func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -526,7 +529,7 @@ func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -568,7 +571,7 @@ func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -607,7 +610,7 @@ func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -672,7 +675,7 @@ func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -722,7 +725,7 @@ func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -767,7 +770,7 @@ func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic boo return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -824,7 +827,7 @@ func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bo return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -870,7 +873,7 @@ func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -930,7 +933,7 @@ func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -993,7 +996,7 @@ func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]by return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1031,7 +1034,7 @@ func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1075,7 +1078,7 @@ func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1126,7 +1129,7 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Sample.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1176,7 +1179,7 @@ func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1219,7 +1222,7 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metric.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1257,7 +1260,7 @@ func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1317,7 +1320,7 @@ func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -1380,7 +1383,7 @@ func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -2903,6 +2906,41 @@ type IngesterServer interface { TransferTSDB(Ingester_TransferTSDBServer) error } +// UnimplementedIngesterServer can be embedded to have forward compatible implementations. +type UnimplementedIngesterServer struct { +} + +func (*UnimplementedIngesterServer) Push(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") +} +func (*UnimplementedIngesterServer) Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { + return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") +} +func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") +} +func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") +} +func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") +} +func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") +} +func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") +} +func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { + return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") +} +func (*UnimplementedIngesterServer) TransferTSDB(srv Ingester_TransferTSDBServer) error { + return status.Errorf(codes.Unimplemented, "method TransferTSDB not implemented") +} + func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { s.RegisterService(&_Ingester_serviceDesc, srv) } @@ -3162,7 +3200,7 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ func (m *WriteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3170,34 +3208,41 @@ func (m *WriteRequest) Marshal() (dAtA []byte, err error) { } func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Source != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x10 + } if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Timeseries[iNdEx].Size() + i -= size + if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if m.Source != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Source)) - } - return i, nil + return len(dAtA) - i, nil } func (m *WriteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3205,17 +3250,22 @@ func (m *WriteResponse) Marshal() (dAtA []byte, err error) { } func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *ReadRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3223,29 +3273,36 @@ func (m *ReadRequest) Marshal() (dAtA []byte, err error) { } func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Queries) > 0 { - for _, msg := range m.Queries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ReadResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3253,29 +3310,36 @@ func (m *ReadResponse) Marshal() (dAtA []byte, err error) { } func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Results) > 0 { - for _, msg := range m.Results { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *QueryRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3283,39 +3347,46 @@ func (m *QueryRequest) Marshal() (dAtA []byte, err error) { } func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - if len(m.Matchers) > 0 { - for _, msg := range m.Matchers { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *QueryResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3323,29 +3394,36 @@ func (m *QueryResponse) Marshal() (dAtA []byte, err error) { } func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3353,29 +3431,36 @@ func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { } func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3383,23 +3468,29 @@ func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { } func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.LabelName) > 0 { - dAtA[i] = 0xa - i++ + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelName))) - i += copy(dAtA[i:], m.LabelName) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3407,32 +3498,31 @@ func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { } func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { + for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelValues[iNdEx]) + copy(dAtA[i:], m.LabelValues[iNdEx]) + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3440,17 +3530,22 @@ func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { } func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3458,32 +3553,31 @@ func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { } func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3491,17 +3585,22 @@ func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { } func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3509,40 +3608,45 @@ func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { } func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.IngestionRate != 0 { - dAtA[i] = 0x9 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) - i += 8 - } - if m.NumSeries != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) + if m.RuleIngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) + i-- + dAtA[i] = 0x21 } if m.ApiIngestionRate != 0 { - dAtA[i] = 0x19 - i++ + i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) - i += 8 + i-- + dAtA[i] = 0x19 } - if m.RuleIngestionRate != 0 { - dAtA[i] = 0x21 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) - i += 8 + if m.NumSeries != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) + i-- + dAtA[i] = 0x10 + } + if m.IngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) + i-- + dAtA[i] = 0x9 } - return i, nil + return len(dAtA) - i, nil } func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3550,33 +3654,41 @@ func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { } func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.UserId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size())) - n1, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x12 } - return i, nil + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3584,29 +3696,36 @@ func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { } func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3614,39 +3733,46 @@ func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { } func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + if len(m.MatchersSet) > 0 { + for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - if len(m.MatchersSet) > 0 { - for _, msg := range m.MatchersSet { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3654,29 +3780,36 @@ func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { } func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Metric) > 0 { - for _, msg := range m.Metric { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3684,53 +3817,64 @@ func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { } func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.FromIngesterId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i += copy(dAtA[i:], m.FromIngesterId) - } - if len(m.UserId) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - if len(m.Chunks) > 0 { - for _, msg := range m.Chunks { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - return i, nil + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0x12 + } + if len(m.FromIngesterId) > 0 { + i -= len(m.FromIngesterId) + copy(dAtA[i:], m.FromIngesterId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *Chunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3738,38 +3882,44 @@ func (m *Chunk) Marshal() (dAtA []byte, err error) { } func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 } if m.Encoding != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintCortex(dAtA, i, uint64(m.Encoding)) + i-- + dAtA[i] = 0x18 } - if len(m.Data) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3777,17 +3927,22 @@ func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { } func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *TimeSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3795,41 +3950,50 @@ func (m *TimeSeries) Marshal() (dAtA []byte, err error) { } func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Samples) > 0 { - for _, msg := range m.Samples { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelPair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3837,29 +4001,36 @@ func (m *LabelPair) Marshal() (dAtA []byte, err error) { } func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Value) + copy(dAtA[i:], m.Value) i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *Sample) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3867,28 +4038,33 @@ func (m *Sample) Marshal() (dAtA []byte, err error) { } func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Value != 0 { - dAtA[i] = 0x9 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i += 8 - } if m.TimestampMs != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 } - return i, nil + return len(dAtA) - i, nil } func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3896,29 +4072,36 @@ func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { } func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Matchers) > 0 { - for _, msg := range m.Matchers { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Metric) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3926,29 +4109,36 @@ func (m *Metric) Marshal() (dAtA []byte, err error) { } func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3956,34 +4146,41 @@ func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { } func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a } if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i-- + dAtA[i] = 0x12 } - if len(m.Value) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) + if m.Type != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -3991,41 +4188,50 @@ func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { } func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeriesFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.FromIngesterId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i += copy(dAtA[i:], m.FromIngesterId) - } - if len(m.UserId) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 } if len(m.Filename) > 0 { - dAtA[i] = 0x1a - i++ + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) - i += copy(dAtA[i:], m.Filename) + i-- + dAtA[i] = 0x1a } - if len(m.Data) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0x12 + } + if len(m.FromIngesterId) > 0 { + i -= len(m.FromIngesterId) + copy(dAtA[i:], m.FromIngesterId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -4033,21 +4239,28 @@ func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { } func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransferTSDBResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { + offset -= sovCortex(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *WriteRequest) Size() (n int) { if m == nil { @@ -4505,14 +4718,7 @@ func (m *TransferTSDBResponse) Size() (n int) { } func sovCortex(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozCortex(x uint64) (n int) { return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -4541,8 +4747,13 @@ func (this *ReadRequest) String() string { if this == nil { return "nil" } + repeatedStringForQueries := "[]*QueryRequest{" + for _, f := range this.Queries { + repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + "," + } + repeatedStringForQueries += "}" s := strings.Join([]string{`&ReadRequest{`, - `Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`, + `Queries:` + repeatedStringForQueries + `,`, `}`, }, "") return s @@ -4551,8 +4762,13 @@ func (this *ReadResponse) String() string { if this == nil { return "nil" } + repeatedStringForResults := "[]*QueryResponse{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + "," + } + repeatedStringForResults += "}" s := strings.Join([]string{`&ReadResponse{`, - `Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`, + `Results:` + repeatedStringForResults + `,`, `}`, }, "") return s @@ -4561,10 +4777,15 @@ func (this *QueryRequest) String() string { if this == nil { return "nil" } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" s := strings.Join([]string{`&QueryRequest{`, `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, `}`, }, "") return s @@ -4573,8 +4794,13 @@ func (this *QueryResponse) String() string { if this == nil { return "nil" } + repeatedStringForTimeseries := "[]TimeSeries{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" s := strings.Join([]string{`&QueryResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`, + `Timeseries:` + repeatedStringForTimeseries + `,`, `}`, }, "") return s @@ -4583,8 +4809,13 @@ func (this *QueryStreamResponse) String() string { if this == nil { return "nil" } + repeatedStringForTimeseries := "[]TimeSeriesChunk{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" s := strings.Join([]string{`&QueryStreamResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`, + `Timeseries:` + repeatedStringForTimeseries + `,`, `}`, }, "") return s @@ -4656,7 +4887,7 @@ func (this *UserIDStatsResponse) String() string { } s := strings.Join([]string{`&UserIDStatsResponse{`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`, + `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, `}`, }, "") return s @@ -4665,8 +4896,13 @@ func (this *UsersStatsResponse) String() string { if this == nil { return "nil" } + repeatedStringForStats := "[]*UserIDStatsResponse{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," + } + repeatedStringForStats += "}" s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`, + `Stats:` + repeatedStringForStats + `,`, `}`, }, "") return s @@ -4675,10 +4911,15 @@ func (this *MetricsForLabelMatchersRequest) String() string { if this == nil { return "nil" } + repeatedStringForMatchersSet := "[]*LabelMatchers{" + for _, f := range this.MatchersSet { + repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + } + repeatedStringForMatchersSet += "}" s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`, + `MatchersSet:` + repeatedStringForMatchersSet + `,`, `}`, }, "") return s @@ -4687,8 +4928,13 @@ func (this *MetricsForLabelMatchersResponse) String() string { if this == nil { return "nil" } + repeatedStringForMetric := "[]*Metric{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + "," + } + repeatedStringForMetric += "}" s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`, + `Metric:` + repeatedStringForMetric + `,`, `}`, }, "") return s @@ -4697,11 +4943,16 @@ func (this *TimeSeriesChunk) String() string { if this == nil { return "nil" } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," + } + repeatedStringForChunks += "}" s := strings.Join([]string{`&TimeSeriesChunk{`, `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, `}`, }, "") return s @@ -4732,9 +4983,14 @@ func (this *TimeSeries) String() string { if this == nil { return "nil" } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" s := strings.Join([]string{`&TimeSeries{`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `Samples:` + repeatedStringForSamples + `,`, `}`, }, "") return s @@ -4765,8 +5021,13 @@ func (this *LabelMatchers) String() string { if this == nil { return "nil" } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, `}`, }, "") return s diff --git a/pkg/ingester/wal.pb.go b/pkg/ingester/wal.pb.go index c3fcc09600..31970266ed 100644 --- a/pkg/ingester/wal.pb.go +++ b/pkg/ingester/wal.pb.go @@ -12,6 +12,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -25,7 +26,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Record struct { UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` @@ -46,7 +47,7 @@ func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Record.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -104,7 +105,7 @@ func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Labels.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -149,7 +150,7 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Sample.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -209,7 +210,7 @@ func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Series.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -510,7 +511,7 @@ func valueToGoStringWal(v interface{}, typ string) string { func (m *Record) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -518,47 +519,57 @@ func (m *Record) Marshal() (dAtA []byte, err error) { } func (m *Record) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.UserId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x12 - i++ - i = encodeVarintWal(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Samples) > 0 { - for _, msg := range m.Samples { - dAtA[i] = 0x1a - i++ - i = encodeVarintWal(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *Labels) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -566,34 +577,41 @@ func (m *Labels) Marshal() (dAtA []byte, err error) { } func (m *Labels) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Fingerprint != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) - } if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x12 - i++ - i = encodeVarintWal(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWal(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *Sample) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -601,33 +619,38 @@ func (m *Sample) Marshal() (dAtA []byte, err error) { } func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Fingerprint != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x19 } if m.Timestamp != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintWal(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 } - if m.Value != 0 { - dAtA[i] = 0x19 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i += 8 + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Series) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -635,56 +658,68 @@ func (m *Series) Marshal() (dAtA []byte, err error) { } func (m *Series) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.UserId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if m.Fingerprint != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) - } - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x1a - i++ - i = encodeVarintWal(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - if len(m.Chunks) > 0 { - for _, msg := range m.Chunks { - dAtA[i] = 0x22 - i++ - i = encodeVarintWal(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWal(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - return i, nil + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x10 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func encodeVarintWal(dAtA []byte, offset int, v uint64) int { + offset -= sovWal(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Record) Size() (n int) { if m == nil { @@ -776,14 +811,7 @@ func (m *Series) Size() (n int) { } func sovWal(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozWal(x uint64) (n int) { return sovWal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -792,10 +820,20 @@ func (this *Record) String() string { if this == nil { return "nil" } + repeatedStringForLabels := "[]Labels{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(strings.Replace(f.String(), "Labels", "Labels", 1), `&`, ``, 1) + "," + } + repeatedStringForLabels += "}" + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" s := strings.Join([]string{`&Record{`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1), `&`, ``, 1) + `,`, - `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `Labels:` + repeatedStringForLabels + `,`, + `Samples:` + repeatedStringForSamples + `,`, `}`, }, "") return s @@ -827,11 +865,16 @@ func (this *Series) String() string { if this == nil { return "nil" } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += fmt.Sprintf("%v", f) + "," + } + repeatedStringForChunks += "}" s := strings.Join([]string{`&Series{`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "client.Chunk", 1), `&`, ``, 1) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, `}`, }, "") return s diff --git a/pkg/querier/frontend/frontend.pb.go b/pkg/querier/frontend/frontend.pb.go index 97b11376f6..ee7a958573 100644 --- a/pkg/querier/frontend/frontend.pb.go +++ b/pkg/querier/frontend/frontend.pb.go @@ -10,8 +10,11 @@ import ( proto "github.com/gogo/protobuf/proto" httpgrpc "github.com/weaveworks/common/httpgrpc" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -25,7 +28,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type ProcessRequest struct { HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,1,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` @@ -44,7 +47,7 @@ func (m *ProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_ProcessRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -87,7 +90,7 @@ func (m *ProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_ProcessResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -282,6 +285,14 @@ type FrontendServer interface { Process(Frontend_ProcessServer) error } +// UnimplementedFrontendServer can be embedded to have forward compatible implementations. +type UnimplementedFrontendServer struct { +} + +func (*UnimplementedFrontendServer) Process(srv Frontend_ProcessServer) error { + return status.Errorf(codes.Unimplemented, "method Process not implemented") +} + func RegisterFrontendServer(s *grpc.Server, srv FrontendServer) { s.RegisterService(&_Frontend_serviceDesc, srv) } @@ -330,7 +341,7 @@ var _Frontend_serviceDesc = grpc.ServiceDesc{ func (m *ProcessRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -338,27 +349,34 @@ func (m *ProcessRequest) Marshal() (dAtA []byte, err error) { } func (m *ProcessRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HttpRequest != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintFrontend(dAtA, i, uint64(m.HttpRequest.Size())) - n1, err := m.HttpRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ProcessResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -366,31 +384,40 @@ func (m *ProcessResponse) Marshal() (dAtA []byte, err error) { } func (m *ProcessResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HttpResponse != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintFrontend(dAtA, i, uint64(m.HttpResponse.Size())) - n2, err := m.HttpResponse.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HttpResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintFrontend(dAtA []byte, offset int, v uint64) int { + offset -= sovFrontend(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ProcessRequest) Size() (n int) { if m == nil { @@ -419,14 +446,7 @@ func (m *ProcessResponse) Size() (n int) { } func sovFrontend(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozFrontend(x uint64) (n int) { return sovFrontend(uint64((x << 1) ^ uint64((int64(x) >> 63)))) diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index b355ce69ba..431ce23805 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -14,6 +14,7 @@ import ( _ "github.com/golang/protobuf/ptypes/duration" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" time "time" @@ -29,7 +30,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PrometheusRequest struct { Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` @@ -53,7 +54,7 @@ func (m *PrometheusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_PrometheusRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -132,7 +133,7 @@ func (m *PrometheusResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([] return xxx_messageInfo_PrometheusResponseHeader.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -186,7 +187,7 @@ func (m *PrometheusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_PrometheusResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -258,7 +259,7 @@ func (m *PrometheusData) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_PrometheusData.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -309,7 +310,7 @@ func (m *SampleStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_SampleStream.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -354,7 +355,7 @@ func (m *CachedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_CachedResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -407,7 +408,7 @@ func (m *Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Extent.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -885,7 +886,7 @@ func valueToGoStringQueryrange(v interface{}, typ string) string { func (m *PrometheusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -893,52 +894,59 @@ func (m *PrometheusRequest) Marshal() (dAtA []byte, err error) { } func (m *PrometheusRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrometheusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Path) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - } - if m.Start != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x32 } - if m.End != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Timeout, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout):]) + if err1 != nil { + return 0, err1 } + i -= n1 + i = encodeVarintQueryrange(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x2a if m.Step != 0 { - dAtA[i] = 0x20 - i++ i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x20 } - dAtA[i] = 0x2a - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Timeout))) - n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Timeout, dAtA[i:]) - if err != nil { - return 0, err + if m.End != 0 { + i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x18 } - i += n1 - if len(m.Query) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) - i += copy(dAtA[i:], m.Query) + if m.Start != 0 { + i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -946,38 +954,38 @@ func (m *PrometheusResponseHeader) Marshal() (dAtA []byte, err error) { } func (m *PrometheusResponseHeader) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrometheusResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -985,55 +993,67 @@ func (m *PrometheusResponse) Marshal() (dAtA []byte, err error) { } func (m *PrometheusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrometheusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Status) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - dAtA[i] = 0x12 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 } - i += n2 if len(m.ErrorType) > 0 { - dAtA[i] = 0x1a - i++ + i -= len(m.ErrorType) + copy(dAtA[i:], m.ErrorType) i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ErrorType))) - i += copy(dAtA[i:], m.ErrorType) - } - if len(m.Error) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) + i-- + dAtA[i] = 0x1a } - if len(m.Headers) > 0 { - for _, msg := range m.Headers { - dAtA[i] = 0x2a - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PrometheusData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1041,35 +1061,43 @@ func (m *PrometheusData) Marshal() (dAtA []byte, err error) { } func (m *PrometheusData) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrometheusData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResultType) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) - i += copy(dAtA[i:], m.ResultType) - } if len(m.Result) > 0 { - for _, msg := range m.Result { - dAtA[i] = 0x12 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Result) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Result[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.ResultType) > 0 { + i -= len(m.ResultType) + copy(dAtA[i:], m.ResultType) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *SampleStream) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1077,41 +1105,50 @@ func (m *SampleStream) Marshal() (dAtA []byte, err error) { } func (m *SampleStream) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Samples) > 0 { - for _, msg := range m.Samples { - dAtA[i] = 0x12 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintQueryrange(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CachedResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1119,35 +1156,43 @@ func (m *CachedResponse) Marshal() (dAtA []byte, err error) { } func (m *CachedResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } if len(m.Extents) > 0 { - for _, msg := range m.Extents { - dAtA[i] = 0x12 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Extents) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Extents[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *Extent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1155,47 +1200,57 @@ func (m *Extent) Marshal() (dAtA []byte, err error) { } func (m *Extent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Extent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Start != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) - } - if m.End != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } if len(m.TraceId) > 0 { - dAtA[i] = 0x22 - i++ + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TraceId))) - i += copy(dAtA[i:], m.TraceId) + i-- + dAtA[i] = 0x22 } - if m.Response != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintQueryrange(dAtA, i, uint64(m.Response.Size())) - n3, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 + if m.End != 0 { + i = encodeVarintQueryrange(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 } - return i, nil + if m.Start != 0 { + i = encodeVarintQueryrange(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { + offset -= sovQueryrange(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PrometheusRequest) Size() (n int) { if m == nil { @@ -1356,14 +1411,7 @@ func (m *Extent) Size() (n int) { } func sovQueryrange(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozQueryrange(x uint64) (n int) { return sovQueryrange(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1377,7 +1425,7 @@ func (this *PrometheusRequest) String() string { `Start:` + fmt.Sprintf("%v", this.Start) + `,`, `End:` + fmt.Sprintf("%v", this.End) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `Timeout:` + strings.Replace(strings.Replace(this.Timeout.String(), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Timeout:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `}`, }, "") @@ -1398,12 +1446,17 @@ func (this *PrometheusResponse) String() string { if this == nil { return "nil" } + repeatedStringForHeaders := "[]*PrometheusResponseHeader{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(f.String(), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + "," + } + repeatedStringForHeaders += "}" s := strings.Join([]string{`&PrometheusResponse{`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Data:` + strings.Replace(strings.Replace(this.Data.String(), "PrometheusData", "PrometheusData", 1), `&`, ``, 1) + `,`, `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Headers:` + strings.Replace(fmt.Sprintf("%v", this.Headers), "PrometheusResponseHeader", "PrometheusResponseHeader", 1) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, `}`, }, "") return s @@ -1412,9 +1465,14 @@ func (this *PrometheusData) String() string { if this == nil { return "nil" } + repeatedStringForResult := "[]SampleStream{" + for _, f := range this.Result { + repeatedStringForResult += strings.Replace(strings.Replace(f.String(), "SampleStream", "SampleStream", 1), `&`, ``, 1) + "," + } + repeatedStringForResult += "}" s := strings.Join([]string{`&PrometheusData{`, `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, - `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "SampleStream", "SampleStream", 1), `&`, ``, 1) + `,`, + `Result:` + repeatedStringForResult + `,`, `}`, }, "") return s @@ -1423,9 +1481,14 @@ func (this *SampleStream) String() string { if this == nil { return "nil" } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSamples += "}" s := strings.Join([]string{`&SampleStream{`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "client.Sample", 1), `&`, ``, 1) + `,`, + `Samples:` + repeatedStringForSamples + `,`, `}`, }, "") return s @@ -1434,9 +1497,14 @@ func (this *CachedResponse) String() string { if this == nil { return "nil" } + repeatedStringForExtents := "[]Extent{" + for _, f := range this.Extents { + repeatedStringForExtents += strings.Replace(strings.Replace(f.String(), "Extent", "Extent", 1), `&`, ``, 1) + "," + } + repeatedStringForExtents += "}" s := strings.Join([]string{`&CachedResponse{`, `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Extents:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Extents), "Extent", "Extent", 1), `&`, ``, 1) + `,`, + `Extents:` + repeatedStringForExtents + `,`, `}`, }, "") return s diff --git a/pkg/ring/ring.pb.go b/pkg/ring/ring.pb.go index 72e40d40e0..62e3ad152a 100644 --- a/pkg/ring/ring.pb.go +++ b/pkg/ring/ring.pb.go @@ -10,6 +10,7 @@ import ( github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" io "io" math "math" + math_bits "math/bits" reflect "reflect" strconv "strconv" strings "strings" @@ -24,7 +25,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type IngesterState int32 @@ -75,7 +76,7 @@ func (m *Desc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Desc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -121,7 +122,7 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_IngesterDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -328,7 +329,7 @@ func valueToGoStringRing(v interface{}, typ string) string { func (m *Desc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -336,43 +337,46 @@ func (m *Desc) Marshal() (dAtA []byte, err error) { } func (m *Desc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Desc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Ingesters) > 0 { - for k, _ := range m.Ingesters { - dAtA[i] = 0xa - i++ + for k := range m.Ingesters { v := m.Ingesters[k] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovRing(uint64(msgSize)) + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRing(dAtA, i, uint64(size)) } - mapSize := 1 + len(k) + sovRing(uint64(len(k))) + msgSize - i = encodeVarintRing(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintRing(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintRing(dAtA, i, uint64((&v).Size())) - n1, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRing(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRing(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -380,26 +384,15 @@ func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { } func (m *IngesterDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngesterDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Addr) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRing(dAtA, i, uint64(len(m.Addr))) - i += copy(dAtA[i:], m.Addr) - } - if m.Timestamp != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRing(dAtA, i, uint64(m.Timestamp)) - } - if m.State != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRing(dAtA, i, uint64(m.State)) - } if len(m.Tokens) > 0 { dAtA3 := make([]byte, len(m.Tokens)*10) var j2 int @@ -412,22 +405,42 @@ func (m *IngesterDesc) MarshalTo(dAtA []byte) (int, error) { dAtA3[j2] = uint8(num) j2++ } - dAtA[i] = 0x32 - i++ + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) i = encodeVarintRing(dAtA, i, uint64(j2)) - i += copy(dAtA[i:], dAtA3[:j2]) + i-- + dAtA[i] = 0x32 + } + if m.State != 0 { + i = encodeVarintRing(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 } - return i, nil + if m.Timestamp != 0 { + i = encodeVarintRing(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.Addr) > 0 { + i -= len(m.Addr) + copy(dAtA[i:], m.Addr) + i = encodeVarintRing(dAtA, i, uint64(len(m.Addr))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func encodeVarintRing(dAtA []byte, offset int, v uint64) int { + offset -= sovRing(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Desc) Size() (n int) { if m == nil { @@ -474,14 +487,7 @@ func (m *IngesterDesc) Size() (n int) { } func sovRing(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozRing(x uint64) (n int) { return sovRing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) diff --git a/pkg/ruler/rules/rules.pb.go b/pkg/ruler/rules/rules.pb.go index b053536520..5c732f4863 100644 --- a/pkg/ruler/rules/rules.pb.go +++ b/pkg/ruler/rules/rules.pb.go @@ -15,6 +15,7 @@ import ( _ "github.com/golang/protobuf/ptypes/timestamp" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" time "time" @@ -30,7 +31,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // RuleGroupDesc is a proto representation of a cortex rule group type RuleGroupDesc struct { @@ -55,7 +56,7 @@ func (m *RuleGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_RuleGroupDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -143,7 +144,7 @@ func (m *RuleDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RuleDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -243,7 +244,7 @@ func (m *AlertDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AlertDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -599,7 +600,7 @@ func valueToGoStringRules(v interface{}, typ string) string { func (m *RuleGroupDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -607,65 +608,75 @@ func (m *RuleGroupDesc) Marshal() (dAtA []byte, err error) { } func (m *RuleGroupDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Namespace) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval))) - n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x22 - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintRules(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 } if m.Deleted { - dAtA[i] = 0x28 - i++ + i-- if m.Deleted { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x28 } - if len(m.User) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintRules(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintRules(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 } - return i, nil + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRules(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *RuleDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -673,97 +684,114 @@ func (m *RuleDesc) Marshal() (dAtA []byte, err error) { } func (m *RuleDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Expr) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Expr))) - i += copy(dAtA[i:], m.Expr) + if len(m.Alerts) > 0 { + for iNdEx := len(m.Alerts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Alerts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } } - if len(m.Record) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Record))) - i += copy(dAtA[i:], m.Record) + if len(m.LastError) > 0 { + i -= len(m.LastError) + copy(dAtA[i:], m.LastError) + i = encodeVarintRules(dAtA, i, uint64(len(m.LastError))) + i-- + dAtA[i] = 0x4a } - if len(m.Alert) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Alert))) - i += copy(dAtA[i:], m.Alert) + if len(m.Health) > 0 { + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintRules(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x22 - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.For))) - n2, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i:]) - if err != nil { - return 0, err + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintRules(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x3a } - i += n2 - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x2a - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Annotations[iNdEx].Size() + i -= size + if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - if len(m.Annotations) > 0 { - for _, msg := range m.Annotations { - dAtA[i] = 0x32 - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.State) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) + if err2 != nil { + return 0, err2 } - if len(m.Health) > 0 { - dAtA[i] = 0x42 - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.Health))) - i += copy(dAtA[i:], m.Health) + i -= n2 + i = encodeVarintRules(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x22 + if len(m.Alert) > 0 { + i -= len(m.Alert) + copy(dAtA[i:], m.Alert) + i = encodeVarintRules(dAtA, i, uint64(len(m.Alert))) + i-- + dAtA[i] = 0x1a } - if len(m.LastError) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.LastError))) - i += copy(dAtA[i:], m.LastError) + if len(m.Record) > 0 { + i -= len(m.Record) + copy(dAtA[i:], m.Record) + i = encodeVarintRules(dAtA, i, uint64(len(m.Record))) + i-- + dAtA[i] = 0x12 } - if len(m.Alerts) > 0 { - for _, msg := range m.Alerts { - dAtA[i] = 0x52 - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if len(m.Expr) > 0 { + i -= len(m.Expr) + copy(dAtA[i:], m.Expr) + i = encodeVarintRules(dAtA, i, uint64(len(m.Expr))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *AlertDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -771,97 +799,109 @@ func (m *AlertDesc) Marshal() (dAtA []byte, err error) { } func (m *AlertDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlertDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.State) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRules(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - } - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x12 - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ValidUntil, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil):]) + if err3 != nil { + return 0, err3 } - if len(m.Annotations) > 0 { - for _, msg := range m.Annotations { - dAtA[i] = 0x1a - i++ - i = encodeVarintRules(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= n3 + i = encodeVarintRules(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x4a + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastSentAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt):]) + if err4 != nil { + return 0, err4 } - if m.Value != 0 { - dAtA[i] = 0x21 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i += 8 + i -= n4 + i = encodeVarintRules(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x42 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ResolvedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt):]) + if err5 != nil { + return 0, err5 } - dAtA[i] = 0x2a - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt))) - n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ActiveAt, dAtA[i:]) - if err != nil { - return 0, err + i -= n5 + i = encodeVarintRules(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x3a + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.FiredAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt):]) + if err6 != nil { + return 0, err6 } - i += n3 + i -= n6 + i = encodeVarintRules(dAtA, i, uint64(n6)) + i-- dAtA[i] = 0x32 - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt))) - n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.FiredAt, dAtA[i:]) - if err != nil { - return 0, err + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ActiveAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt):]) + if err7 != nil { + return 0, err7 } - i += n4 - dAtA[i] = 0x3a - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt))) - n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ResolvedAt, dAtA[i:]) - if err != nil { - return 0, err + i -= n7 + i = encodeVarintRules(dAtA, i, uint64(n7)) + i-- + dAtA[i] = 0x2a + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x21 } - i += n5 - dAtA[i] = 0x42 - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt))) - n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastSentAt, dAtA[i:]) - if err != nil { - return 0, err + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Annotations[iNdEx].Size() + i -= size + if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n6 - dAtA[i] = 0x4a - i++ - i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil))) - n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ValidUntil, dAtA[i:]) - if err != nil { - return 0, err + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintRules(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0xa } - i += n7 - return i, nil + return len(dAtA) - i, nil } func encodeVarintRules(dAtA []byte, offset int, v uint64) int { + offset -= sovRules(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RuleGroupDesc) Size() (n int) { if m == nil { @@ -987,14 +1027,7 @@ func (m *AlertDesc) Size() (n int) { } func sovRules(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozRules(x uint64) (n int) { return sovRules(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1003,11 +1036,16 @@ func (this *RuleGroupDesc) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]*RuleDesc{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(f.String(), "RuleDesc", "RuleDesc", 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&RuleGroupDesc{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Interval:` + strings.Replace(strings.Replace(this.Interval.String(), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleDesc", "RuleDesc", 1) + `,`, + `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `}`, @@ -1018,17 +1056,22 @@ func (this *RuleDesc) String() string { if this == nil { return "nil" } + repeatedStringForAlerts := "[]*AlertDesc{" + for _, f := range this.Alerts { + repeatedStringForAlerts += strings.Replace(f.String(), "AlertDesc", "AlertDesc", 1) + "," + } + repeatedStringForAlerts += "}" s := strings.Join([]string{`&RuleDesc{`, `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, `Record:` + fmt.Sprintf("%v", this.Record) + `,`, `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, - `For:` + strings.Replace(strings.Replace(this.For.String(), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `Health:` + fmt.Sprintf("%v", this.Health) + `,`, `LastError:` + fmt.Sprintf("%v", this.LastError) + `,`, - `Alerts:` + strings.Replace(fmt.Sprintf("%v", this.Alerts), "AlertDesc", "AlertDesc", 1) + `,`, + `Alerts:` + repeatedStringForAlerts + `,`, `}`, }, "") return s @@ -1042,11 +1085,11 @@ func (this *AlertDesc) String() string { `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ActiveAt:` + strings.Replace(strings.Replace(this.ActiveAt.String(), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `FiredAt:` + strings.Replace(strings.Replace(this.FiredAt.String(), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `ResolvedAt:` + strings.Replace(strings.Replace(this.ResolvedAt.String(), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `LastSentAt:` + strings.Replace(strings.Replace(this.LastSentAt.String(), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, - `ValidUntil:` + strings.Replace(strings.Replace(this.ValidUntil.String(), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `ActiveAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActiveAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `FiredAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FiredAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `ResolvedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResolvedAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `LastSentAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastSentAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `ValidUntil:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ValidUntil), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, `}`, }, "") return s From 2744cbb1512855152b64674e07fab9f8990a5a01 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 30 Jan 2020 14:05:40 -0500 Subject: [PATCH 02/20] Update changelog Signed-off-by: Cyril Tovena --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9df155d04c..79b9714bc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## master / unreleased +* [CHANGE] Upgraded gogo/protobuf tool chain from 1.2.1 to 1.3.0. #2055 * [CHANGE] Config file changed to remove top level `config_store` field in favor of a nested `configdb` field. #2125 * [CHANGE] Removed unnecessary `frontend.cache-split-interval` in favor of `querier.split-queries-by-interval` both to reduce configuration complexity and guarantee alignment of these two configs. Starting from now, `-querier.cache-results` may only be enabled in conjunction with `-querier.split-queries-by-interval` (previously the cache interval default was `24h` so if you want to preserve the same behaviour you should set `-querier.split-queries-by-interval=24h`). #2040 * [CHANGE] Removed remaining support for using denormalised tokens in the ring. If you're still running ingesters with denormalised tokens (Cortex 0.4 or earlier, with `-ingester.normalise-tokens=false`), such ingesters will now be completely invisible to distributors and need to be either switched to Cortex 0.6.0 or later, or be configured to use normalised tokens. #2034 From cd48c6ac8c967404d89c846dded753b55fd5fbdf Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 31 Jan 2020 13:47:02 -0500 Subject: [PATCH 03/20] Adding compatibility tests, found an issue Signed-off-by: Cyril Tovena --- integration-tests/proto/1.2.1/cortex.pb.go | 7706 ++++++++++++++++++ integration-tests/proto/1.2.1/timeseries.go | 256 + integration-tests/proto/1.3.0/cortex.pb.go | 7967 +++++++++++++++++++ integration-tests/proto/1.3.0/timeseries.go | 256 + integration-tests/proto/proto_test.go | 246 + 5 files changed, 16431 insertions(+) create mode 100644 integration-tests/proto/1.2.1/cortex.pb.go create mode 100644 integration-tests/proto/1.2.1/timeseries.go create mode 100644 integration-tests/proto/1.3.0/cortex.pb.go create mode 100644 integration-tests/proto/1.3.0/timeseries.go create mode 100644 integration-tests/proto/proto_test.go diff --git a/integration-tests/proto/1.2.1/cortex.pb.go b/integration-tests/proto/1.2.1/cortex.pb.go new file mode 100644 index 0000000000..08b63a9016 --- /dev/null +++ b/integration-tests/proto/1.2.1/cortex.pb.go @@ -0,0 +1,7706 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cortex.proto + +package client + +import ( + bytes "bytes" + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + io "io" + math "math" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type MatchType int32 + +const ( + EQUAL MatchType = 0 + NOT_EQUAL MatchType = 1 + REGEX_MATCH MatchType = 2 + REGEX_NO_MATCH MatchType = 3 +) + +var MatchType_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "REGEX_MATCH", + 3: "REGEX_NO_MATCH", +} + +var MatchType_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "REGEX_MATCH": 2, + "REGEX_NO_MATCH": 3, +} + +func (MatchType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} +} + +type WriteRequest_SourceEnum int32 + +const ( + API WriteRequest_SourceEnum = 0 + RULE WriteRequest_SourceEnum = 1 +) + +var WriteRequest_SourceEnum_name = map[int32]string{ + 0: "API", + 1: "RULE", +} + +var WriteRequest_SourceEnum_value = map[string]int32{ + "API": 0, + "RULE": 1, +} + +func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0, 0} +} + +type WriteRequest struct { + Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` + Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { + if m != nil { + return m.Source + } + return API +} + +type WriteResponse struct { +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{1} +} +func (m *WriteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) +} +func (m *WriteResponse) XXX_Size() int { + return m.Size() +} +func (m *WriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponse proto.InternalMessageInfo + +type ReadRequest struct { + Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{2} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetQueries() []*QueryRequest { + if m != nil { + return m.Queries + } + return nil +} + +type ReadResponse struct { + Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{3} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetResults() []*QueryResponse { + if m != nil { + return m.Results + } + return nil +} + +type QueryRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (m *QueryRequest) Reset() { *m = QueryRequest{} } +func (*QueryRequest) ProtoMessage() {} +func (*QueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{4} +} +func (m *QueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRequest.Merge(m, src) +} +func (m *QueryRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRequest proto.InternalMessageInfo + +func (m *QueryRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *QueryRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *QueryRequest) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +type QueryResponse struct { + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` +} + +func (m *QueryResponse) Reset() { *m = QueryResponse{} } +func (*QueryResponse) ProtoMessage() {} +func (*QueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{5} +} +func (m *QueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResponse.Merge(m, src) +} +func (m *QueryResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResponse proto.InternalMessageInfo + +func (m *QueryResponse) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// QueryStreamResponse contains a batch of timeseries chunks. +type QueryStreamResponse struct { + Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` +} + +func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } +func (*QueryStreamResponse) ProtoMessage() {} +func (*QueryStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{6} +} +func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStreamResponse.Merge(m, src) +} +func (m *QueryStreamResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStreamResponse proto.InternalMessageInfo + +func (m *QueryStreamResponse) GetTimeseries() []TimeSeriesChunk { + if m != nil { + return m.Timeseries + } + return nil +} + +type LabelValuesRequest struct { + LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` +} + +func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } +func (*LabelValuesRequest) ProtoMessage() {} +func (*LabelValuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{7} +} +func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequest.Merge(m, src) +} +func (m *LabelValuesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo + +func (m *LabelValuesRequest) GetLabelName() string { + if m != nil { + return m.LabelName + } + return "" +} + +type LabelValuesResponse struct { + LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` +} + +func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } +func (*LabelValuesResponse) ProtoMessage() {} +func (*LabelValuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{8} +} +func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponse.Merge(m, src) +} +func (m *LabelValuesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo + +func (m *LabelValuesResponse) GetLabelValues() []string { + if m != nil { + return m.LabelValues + } + return nil +} + +type LabelNamesRequest struct { +} + +func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } +func (*LabelNamesRequest) ProtoMessage() {} +func (*LabelNamesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{9} +} +func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequest.Merge(m, src) +} +func (m *LabelNamesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo + +type LabelNamesResponse struct { + LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } +func (*LabelNamesResponse) ProtoMessage() {} +func (*LabelNamesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{10} +} +func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponse.Merge(m, src) +} +func (m *LabelNamesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo + +func (m *LabelNamesResponse) GetLabelNames() []string { + if m != nil { + return m.LabelNames + } + return nil +} + +type UserStatsRequest struct { +} + +func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } +func (*UserStatsRequest) ProtoMessage() {} +func (*UserStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{11} +} +func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsRequest.Merge(m, src) +} +func (m *UserStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *UserStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UserStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UserStatsRequest proto.InternalMessageInfo + +type UserStatsResponse struct { + IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate,proto3" json:"ingestion_rate,omitempty"` + NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` + ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` + RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` +} + +func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } +func (*UserStatsResponse) ProtoMessage() {} +func (*UserStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{12} +} +func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsResponse.Merge(m, src) +} +func (m *UserStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UserStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UserStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UserStatsResponse proto.InternalMessageInfo + +func (m *UserStatsResponse) GetIngestionRate() float64 { + if m != nil { + return m.IngestionRate + } + return 0 +} + +func (m *UserStatsResponse) GetNumSeries() uint64 { + if m != nil { + return m.NumSeries + } + return 0 +} + +func (m *UserStatsResponse) GetApiIngestionRate() float64 { + if m != nil { + return m.ApiIngestionRate + } + return 0 +} + +func (m *UserStatsResponse) GetRuleIngestionRate() float64 { + if m != nil { + return m.RuleIngestionRate + } + return 0 +} + +type UserIDStatsResponse struct { + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } +func (*UserIDStatsResponse) ProtoMessage() {} +func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{13} +} +func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIDStatsResponse.Merge(m, src) +} +func (m *UserIDStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UserIDStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UserIDStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UserIDStatsResponse proto.InternalMessageInfo + +func (m *UserIDStatsResponse) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *UserIDStatsResponse) GetData() *UserStatsResponse { + if m != nil { + return m.Data + } + return nil +} + +type UsersStatsResponse struct { + Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` +} + +func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } +func (*UsersStatsResponse) ProtoMessage() {} +func (*UsersStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{14} +} +func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsersStatsResponse.Merge(m, src) +} +func (m *UsersStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UsersStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UsersStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UsersStatsResponse proto.InternalMessageInfo + +func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { + if m != nil { + return m.Stats + } + return nil +} + +type MetricsForLabelMatchersRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` +} + +func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } +func (*MetricsForLabelMatchersRequest) ProtoMessage() {} +func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{15} +} +func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) +} +func (m *MetricsForLabelMatchersRequest) XXX_Size() int { + return m.Size() +} +func (m *MetricsForLabelMatchersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsForLabelMatchersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsForLabelMatchersRequest proto.InternalMessageInfo + +func (m *MetricsForLabelMatchersRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *MetricsForLabelMatchersRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { + if m != nil { + return m.MatchersSet + } + return nil +} + +type MetricsForLabelMatchersResponse struct { + Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` +} + +func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } +func (*MetricsForLabelMatchersResponse) ProtoMessage() {} +func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{16} +} +func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) +} +func (m *MetricsForLabelMatchersResponse) XXX_Size() int { + return m.Size() +} +func (m *MetricsForLabelMatchersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsForLabelMatchersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsForLabelMatchersResponse proto.InternalMessageInfo + +func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +type TimeSeriesChunk struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` + Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` +} + +func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } +func (*TimeSeriesChunk) ProtoMessage() {} +func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{17} +} +func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesChunk.Merge(m, src) +} +func (m *TimeSeriesChunk) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesChunk) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo + +func (m *TimeSeriesChunk) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesChunk) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesChunk) GetChunks() []Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +type Chunk struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Encoding int32 `protobuf:"varint,3,opt,name=encoding,proto3" json:"encoding,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{18} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *Chunk) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *Chunk) GetEncoding() int32 { + if m != nil { + return m.Encoding + } + return 0 +} + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferChunksResponse struct { +} + +func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } +func (*TransferChunksResponse) ProtoMessage() {} +func (*TransferChunksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{19} +} +func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferChunksResponse.Merge(m, src) +} +func (m *TransferChunksResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferChunksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo + +type TimeSeries struct { + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` + // Sorted by time, oldest sample first. + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{20} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +type LabelPair struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{21} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LabelPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{22} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type LabelMatchers struct { + Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } +func (*LabelMatchers) ProtoMessage() {} +func (*LabelMatchers) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{23} +} +func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatchers) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatchers.Merge(m, src) +} +func (m *LabelMatchers) XXX_Size() int { + return m.Size() +} +func (m *LabelMatchers) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatchers.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo + +func (m *LabelMatchers) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +type Metric struct { + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{24} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type LabelMatcher struct { + Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{25} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo + +func (m *LabelMatcher) GetType() MatchType { + if m != nil { + return m.Type + } + return EQUAL +} + +func (m *LabelMatcher) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelMatcher) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type TimeSeriesFile struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } +func (*TimeSeriesFile) ProtoMessage() {} +func (*TimeSeriesFile) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{26} +} +func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesFile.Merge(m, src) +} +func (m *TimeSeriesFile) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesFile) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo + +func (m *TimeSeriesFile) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesFile) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesFile) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *TimeSeriesFile) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferTSDBResponse struct { +} + +func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } +func (*TransferTSDBResponse) ProtoMessage() {} +func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{27} +} +func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferTSDBResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferTSDBResponse.Merge(m, src) +} +func (m *TransferTSDBResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferTSDBResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferTSDBResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("cortex.MatchType1.2", MatchType_name, MatchType_value) + proto.RegisterEnum("cortex.WriteRequest_SourceEnum1.2", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) + proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest1.2") + proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse1.2") + proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest1.2") + proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse1.2") + proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest1.2") + proto.RegisterType((*QueryResponse)(nil), "cortex.QueryResponse1.2") + proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse1.2") + proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest1.2") + proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse1.2") + proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest1.2") + proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse1.2") + proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest1.2") + proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse1.2") + proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse1.2") + proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse1.2") + proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest1.2") + proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse1.2") + proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk1.2") + proto.RegisterType((*Chunk)(nil), "cortex.Chunk1.2") + proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse1.2") + proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries1.2") + proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair1.2") + proto.RegisterType((*Sample)(nil), "cortex.Sample1.2") + proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers1.2") + proto.RegisterType((*Metric)(nil), "cortex.Metric1.2") + proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher1.2") + proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile1.2") + proto.RegisterType((*TransferTSDBResponse)(nil), "cortex.TransferTSDBResponse1.2") +} + +func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } + +var fileDescriptor_893a47d0a749d749 = []byte{ + // 1266 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xdf, 0x8d, 0x5f, 0x12, 0x3f, 0xde, 0xb8, 0xce, 0x24, 0x6d, 0xd3, 0xed, 0xff, 0xbf, 0x29, + 0x23, 0xb5, 0x44, 0x40, 0xdd, 0x92, 0xaa, 0xd0, 0x03, 0x55, 0xe5, 0xb4, 0x69, 0x6b, 0x94, 0xa4, + 0xe9, 0xd8, 0x05, 0x84, 0x84, 0xac, 0x8d, 0x3d, 0x49, 0x56, 0xec, 0x8b, 0xbb, 0x33, 0x8b, 0xe8, + 0x01, 0x09, 0x89, 0x0f, 0x00, 0x47, 0x3e, 0x02, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8f, 0x15, + 0x87, 0x8a, 0x3a, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xee, 0x5a, 0x44, 0x40, + 0x6f, 0x3b, 0xcf, 0xf3, 0x9b, 0xdf, 0x3c, 0xaf, 0x33, 0xcf, 0x82, 0x31, 0x08, 0x42, 0x4e, 0xbf, + 0x6e, 0x8d, 0xc2, 0x80, 0x07, 0xa8, 0x2a, 0x57, 0xe6, 0xe5, 0x43, 0x87, 0x1f, 0x45, 0xfb, 0xad, + 0x41, 0xe0, 0x5d, 0x39, 0x0c, 0x0e, 0x83, 0x2b, 0x42, 0xbd, 0x1f, 0x1d, 0x88, 0x95, 0x58, 0x88, + 0x2f, 0xb9, 0x0d, 0xff, 0xaa, 0x83, 0xf1, 0x69, 0xe8, 0x70, 0x4a, 0xe8, 0xe3, 0x88, 0x32, 0x8e, + 0x76, 0x01, 0xb8, 0xe3, 0x51, 0x46, 0x43, 0x87, 0xb2, 0x55, 0xfd, 0x42, 0x69, 0xbd, 0xbe, 0x81, + 0x5a, 0xea, 0xa8, 0x9e, 0xe3, 0xd1, 0xae, 0xd0, 0x6c, 0x9a, 0x4f, 0x5f, 0xac, 0x69, 0xbf, 0xbf, + 0x58, 0x43, 0x7b, 0x21, 0xb5, 0x5d, 0x37, 0x18, 0xf4, 0xd2, 0x5d, 0x24, 0xc3, 0x80, 0x3e, 0x84, + 0x6a, 0x37, 0x88, 0xc2, 0x01, 0x5d, 0x9d, 0xbb, 0xa0, 0xaf, 0x37, 0x36, 0xd6, 0x12, 0xae, 0xec, + 0xa9, 0x2d, 0x09, 0xd9, 0xf2, 0x23, 0x8f, 0x54, 0x99, 0xf8, 0xc6, 0x6b, 0x00, 0x13, 0x29, 0x9a, + 0x87, 0x52, 0x7b, 0xaf, 0xd3, 0xd4, 0xd0, 0x02, 0x94, 0xc9, 0xa3, 0xed, 0xad, 0xa6, 0x8e, 0x4f, + 0xc1, 0xa2, 0xe2, 0x60, 0xa3, 0xc0, 0x67, 0x14, 0xdf, 0x84, 0x3a, 0xa1, 0xf6, 0x30, 0xf1, 0xa4, + 0x05, 0xf3, 0x8f, 0xa3, 0xac, 0x1b, 0x2b, 0xc9, 0xd1, 0x0f, 0x23, 0x1a, 0x3e, 0x51, 0x30, 0x92, + 0x80, 0xf0, 0x2d, 0x30, 0xe4, 0x76, 0x49, 0x87, 0xae, 0xc0, 0x7c, 0x48, 0x59, 0xe4, 0xf2, 0x64, + 0xff, 0xe9, 0xa9, 0xfd, 0x12, 0x47, 0x12, 0x14, 0xfe, 0x51, 0x07, 0x23, 0x4b, 0x8d, 0xde, 0x03, + 0xc4, 0xb8, 0x1d, 0xf2, 0xbe, 0x88, 0x07, 0xb7, 0xbd, 0x51, 0xdf, 0x8b, 0xc9, 0xf4, 0xf5, 0x12, + 0x69, 0x0a, 0x4d, 0x2f, 0x51, 0xec, 0x30, 0xb4, 0x0e, 0x4d, 0xea, 0x0f, 0xf3, 0xd8, 0x39, 0x81, + 0x6d, 0x50, 0x7f, 0x98, 0x45, 0x5e, 0x85, 0x05, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xd5, 0x52, + 0xde, 0xb5, 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x49, 0x52, 0x14, 0xee, 0xc0, 0x62, 0xce, 0x68, + 0x74, 0xe3, 0x84, 0x69, 0x2e, 0xc7, 0x69, 0xce, 0x26, 0x14, 0xf7, 0x60, 0x59, 0x50, 0x75, 0x79, + 0x48, 0x6d, 0x2f, 0x25, 0xbc, 0x59, 0x40, 0x78, 0xf6, 0x75, 0xc2, 0xdb, 0x47, 0x91, 0xff, 0x65, + 0x01, 0xeb, 0x35, 0x40, 0xc2, 0xf4, 0x4f, 0x6c, 0x37, 0xa2, 0x2c, 0x09, 0xe0, 0xff, 0x01, 0xdc, + 0x58, 0xda, 0xf7, 0x6d, 0x8f, 0x8a, 0xc0, 0xd5, 0x48, 0x4d, 0x48, 0x76, 0x6d, 0x8f, 0xe2, 0x1b, + 0xb0, 0x9c, 0xdb, 0xa4, 0x4c, 0x79, 0x0b, 0x0c, 0xb9, 0xeb, 0x2b, 0x21, 0x17, 0xc6, 0xd4, 0x48, + 0xdd, 0x9d, 0x40, 0xf1, 0x32, 0x2c, 0x6d, 0x27, 0x34, 0xc9, 0x69, 0xf8, 0xba, 0xb2, 0x41, 0x09, + 0x15, 0xdb, 0x1a, 0xd4, 0x27, 0x36, 0x24, 0x64, 0x90, 0x1a, 0xc1, 0x30, 0x82, 0xe6, 0x23, 0x46, + 0xc3, 0x2e, 0xb7, 0x79, 0x4a, 0xf5, 0x8b, 0x0e, 0x4b, 0x19, 0xa1, 0xa2, 0xba, 0x08, 0x0d, 0xc7, + 0x3f, 0xa4, 0x8c, 0x3b, 0x81, 0xdf, 0x0f, 0x6d, 0x2e, 0x5d, 0xd2, 0xc9, 0x62, 0x2a, 0x25, 0x36, + 0xa7, 0xb1, 0xd7, 0x7e, 0xe4, 0xf5, 0x55, 0x28, 0xe3, 0x12, 0x28, 0x93, 0x9a, 0x1f, 0x79, 0x32, + 0x82, 0x71, 0x55, 0xd9, 0x23, 0xa7, 0x3f, 0xc5, 0x54, 0x12, 0x4c, 0x4d, 0x7b, 0xe4, 0x74, 0x72, + 0x64, 0x2d, 0x58, 0x0e, 0x23, 0x97, 0x4e, 0xc3, 0xcb, 0x02, 0xbe, 0x14, 0xab, 0x72, 0x78, 0xfc, + 0x05, 0x2c, 0xc7, 0x86, 0x77, 0xee, 0xe4, 0x4d, 0x3f, 0x0b, 0xf3, 0x11, 0xa3, 0x61, 0xdf, 0x19, + 0xaa, 0x34, 0x54, 0xe3, 0x65, 0x67, 0x88, 0x2e, 0x43, 0x79, 0x68, 0x73, 0x5b, 0x98, 0x59, 0xdf, + 0x38, 0x97, 0x64, 0xfc, 0x35, 0xe7, 0x89, 0x80, 0xe1, 0x7b, 0x80, 0x62, 0x15, 0xcb, 0xb3, 0xbf, + 0x0f, 0x15, 0x16, 0x0b, 0x54, 0xdd, 0x9c, 0xcf, 0xb2, 0x4c, 0x59, 0x42, 0x24, 0x12, 0xff, 0xac, + 0x83, 0xb5, 0x43, 0x79, 0xe8, 0x0c, 0xd8, 0xdd, 0x20, 0xcc, 0x96, 0x3d, 0x7b, 0xd3, 0xed, 0x77, + 0x03, 0x8c, 0xa4, 0xb1, 0xfa, 0x8c, 0x72, 0xd5, 0x82, 0xa7, 0x8b, 0x5a, 0x90, 0x91, 0x7a, 0x02, + 0xed, 0x52, 0x8e, 0x3b, 0xb0, 0x36, 0xd3, 0x66, 0x15, 0x8a, 0x4b, 0x50, 0xf5, 0x04, 0x44, 0xc5, + 0xa2, 0x91, 0xd0, 0xca, 0x8d, 0x44, 0x69, 0xf1, 0x6f, 0x3a, 0x9c, 0x9a, 0x6a, 0xab, 0xd8, 0x85, + 0x83, 0x30, 0xf0, 0x54, 0xae, 0xb3, 0xd9, 0x6a, 0xc4, 0xf2, 0x8e, 0x12, 0x77, 0x86, 0xd9, 0x74, + 0xce, 0xe5, 0xd2, 0x79, 0x0b, 0xaa, 0xa2, 0xb4, 0x93, 0x8b, 0x65, 0x29, 0xe7, 0xd5, 0x9e, 0xed, + 0x84, 0x9b, 0x2b, 0xea, 0xe6, 0x37, 0x84, 0xa8, 0x3d, 0xb4, 0x47, 0x9c, 0x86, 0x44, 0x6d, 0x43, + 0xef, 0x42, 0x75, 0x10, 0x1b, 0xc3, 0x56, 0xcb, 0x82, 0x60, 0x31, 0x21, 0xc8, 0x76, 0xbe, 0x82, + 0xe0, 0xef, 0x75, 0xa8, 0x48, 0xd3, 0xdf, 0x54, 0xae, 0x4c, 0x58, 0xa0, 0xfe, 0x20, 0x18, 0x3a, + 0xfe, 0xa1, 0x68, 0x91, 0x0a, 0x49, 0xd7, 0x08, 0xa9, 0xd2, 0x8d, 0x7b, 0xc1, 0x50, 0xf5, 0xb9, + 0x0a, 0x67, 0x7a, 0xa1, 0xed, 0xb3, 0x03, 0x1a, 0x0a, 0xc3, 0xd2, 0xc4, 0xe0, 0x6f, 0x00, 0x26, + 0xf1, 0xce, 0xc4, 0x49, 0xff, 0x67, 0x71, 0x6a, 0xc1, 0x3c, 0xb3, 0xbd, 0x91, 0x2b, 0x3a, 0x3c, + 0x97, 0xe8, 0xae, 0x10, 0xab, 0x48, 0x25, 0x20, 0x7c, 0x1d, 0x6a, 0x29, 0x75, 0x6c, 0x79, 0x7a, + 0x23, 0x1a, 0x44, 0x7c, 0xa3, 0x15, 0xa8, 0x88, 0xfb, 0x4e, 0x04, 0xc2, 0x20, 0x72, 0x81, 0xdb, + 0x50, 0x95, 0x7c, 0x13, 0xbd, 0xbc, 0x73, 0xe4, 0x22, 0xbe, 0x2b, 0x0b, 0xa2, 0x58, 0xe7, 0x93, + 0x10, 0xe2, 0x36, 0x2c, 0xe6, 0x4a, 0x35, 0xf7, 0xfc, 0xe8, 0x27, 0x7c, 0x7e, 0xaa, 0xb2, 0x7c, + 0xff, 0x75, 0xdc, 0x70, 0x1f, 0x8c, 0xec, 0x21, 0xe8, 0x22, 0x94, 0xf9, 0x93, 0x91, 0xf4, 0xaa, + 0x31, 0xa1, 0x13, 0xea, 0xde, 0x93, 0x11, 0x25, 0x42, 0x9d, 0x46, 0x4c, 0x56, 0xfb, 0x54, 0xc4, + 0x4a, 0x42, 0xa8, 0x22, 0xf6, 0x9d, 0x0e, 0x8d, 0x49, 0xa2, 0xef, 0x3a, 0x2e, 0xfd, 0x2f, 0xfa, + 0xca, 0x84, 0x85, 0x03, 0xc7, 0xa5, 0xc2, 0x06, 0x79, 0x5c, 0xba, 0x2e, 0xac, 0xc3, 0x33, 0xb0, + 0x92, 0xd4, 0x61, 0xaf, 0x7b, 0x67, 0x33, 0xa9, 0xc2, 0x77, 0x3e, 0x86, 0x5a, 0xea, 0x1a, 0xaa, + 0x41, 0x65, 0xeb, 0xe1, 0xa3, 0xf6, 0x76, 0x53, 0x43, 0x8b, 0x50, 0xdb, 0x7d, 0xd0, 0xeb, 0xcb, + 0xa5, 0x8e, 0x4e, 0x41, 0x9d, 0x6c, 0xdd, 0xdb, 0xfa, 0xac, 0xbf, 0xd3, 0xee, 0xdd, 0xbe, 0xdf, + 0x9c, 0x43, 0x08, 0x1a, 0x52, 0xb0, 0xfb, 0x40, 0xc9, 0x4a, 0x1b, 0xc7, 0x15, 0x58, 0x48, 0x6c, + 0x47, 0xd7, 0xa1, 0xbc, 0x17, 0xb1, 0x23, 0xb4, 0x52, 0x34, 0x9f, 0x99, 0xa7, 0xa7, 0xa4, 0xaa, + 0x27, 0x34, 0xf4, 0x01, 0x54, 0xc4, 0x34, 0x80, 0x0a, 0x87, 0x2b, 0xb3, 0x78, 0x64, 0xc2, 0x1a, + 0xba, 0x03, 0xf5, 0xcc, 0x14, 0x31, 0x63, 0xf7, 0xf9, 0x9c, 0x34, 0x3f, 0x70, 0x60, 0xed, 0xaa, + 0x8e, 0xee, 0x43, 0x3d, 0x33, 0x00, 0x20, 0x33, 0x57, 0x4c, 0xb9, 0x51, 0x62, 0xc2, 0x55, 0x30, + 0x31, 0x60, 0x0d, 0x6d, 0x01, 0x4c, 0xde, 0x7e, 0x74, 0x2e, 0x07, 0xce, 0x0e, 0x09, 0xa6, 0x59, + 0xa4, 0x4a, 0x69, 0x36, 0xa1, 0x96, 0xbe, 0x7c, 0x68, 0xb5, 0xe0, 0x31, 0x94, 0x24, 0xb3, 0x9f, + 0x49, 0xac, 0xa1, 0xbb, 0x60, 0xb4, 0x5d, 0xf7, 0x24, 0x34, 0x66, 0x56, 0xc3, 0xa6, 0x79, 0x5c, + 0x38, 0x3b, 0xe3, 0xb1, 0x41, 0x97, 0xf2, 0x8f, 0xca, 0xac, 0x17, 0xd4, 0x7c, 0xfb, 0x6f, 0x71, + 0xe9, 0x69, 0x3b, 0xd0, 0xc8, 0x5f, 0x9c, 0x68, 0xd6, 0xf4, 0x67, 0x5a, 0xa9, 0xa2, 0xf8, 0xa6, + 0xd5, 0xd6, 0xe3, 0xcc, 0x1a, 0xd9, 0xfa, 0x47, 0x67, 0x5e, 0x27, 0x8b, 0x5b, 0xd3, 0xfc, 0xdf, + 0x34, 0x57, 0xb6, 0x5b, 0x62, 0xa6, 0xcd, 0x8f, 0x9e, 0xbd, 0xb4, 0xb4, 0xe7, 0x2f, 0x2d, 0xed, + 0xd5, 0x4b, 0x4b, 0xff, 0x76, 0x6c, 0xe9, 0x3f, 0x8d, 0x2d, 0xfd, 0xe9, 0xd8, 0xd2, 0x9f, 0x8d, + 0x2d, 0xfd, 0x8f, 0xb1, 0xa5, 0xff, 0x39, 0xb6, 0xb4, 0x57, 0x63, 0x4b, 0xff, 0xe1, 0xd8, 0xd2, + 0x9e, 0x1d, 0x5b, 0xda, 0xf3, 0x63, 0x4b, 0xfb, 0xbc, 0x3a, 0x70, 0x1d, 0xea, 0xf3, 0xfd, 0xaa, + 0xf8, 0x4d, 0xba, 0xf6, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x33, 0x55, 0x6d, 0x0d, + 0x00, 0x00, +} + +func (x MatchType) String() string { + s, ok := MatchType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x WriteRequest_SourceEnum) String() string { + s, ok := WriteRequest_SourceEnum_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *WriteRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteRequest) + if !ok { + that2, ok := that.(WriteRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(that1.Timeseries[i]) { + return false + } + } + if this.Source != that1.Source { + return false + } + return true +} +func (this *WriteResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteResponse) + if !ok { + that2, ok := that.(WriteResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *ReadRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ReadRequest) + if !ok { + that2, ok := that.(ReadRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Queries) != len(that1.Queries) { + return false + } + for i := range this.Queries { + if !this.Queries[i].Equal(that1.Queries[i]) { + return false + } + } + return true +} +func (this *ReadResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ReadResponse) + if !ok { + that2, ok := that.(ReadResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Results) != len(that1.Results) { + return false + } + for i := range this.Results { + if !this.Results[i].Equal(that1.Results[i]) { + return false + } + } + return true +} +func (this *QueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryRequest) + if !ok { + that2, ok := that.(QueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } + } + return true +} +func (this *QueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResponse) + if !ok { + that2, ok := that.(QueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { + return false + } + } + return true +} +func (this *QueryStreamResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryStreamResponse) + if !ok { + that2, ok := that.(QueryStreamResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { + return false + } + } + return true +} +func (this *LabelValuesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelValuesRequest) + if !ok { + that2, ok := that.(LabelValuesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.LabelName != that1.LabelName { + return false + } + return true +} +func (this *LabelValuesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelValuesResponse) + if !ok { + that2, ok := that.(LabelValuesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelValues) != len(that1.LabelValues) { + return false + } + for i := range this.LabelValues { + if this.LabelValues[i] != that1.LabelValues[i] { + return false + } + } + return true +} +func (this *LabelNamesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelNamesRequest) + if !ok { + that2, ok := that.(LabelNamesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *LabelNamesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelNamesResponse) + if !ok { + that2, ok := that.(LabelNamesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelNames) != len(that1.LabelNames) { + return false + } + for i := range this.LabelNames { + if this.LabelNames[i] != that1.LabelNames[i] { + return false + } + } + return true +} +func (this *UserStatsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserStatsRequest) + if !ok { + that2, ok := that.(UserStatsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *UserStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserStatsResponse) + if !ok { + that2, ok := that.(UserStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.IngestionRate != that1.IngestionRate { + return false + } + if this.NumSeries != that1.NumSeries { + return false + } + if this.ApiIngestionRate != that1.ApiIngestionRate { + return false + } + if this.RuleIngestionRate != that1.RuleIngestionRate { + return false + } + return true +} +func (this *UserIDStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserIDStatsResponse) + if !ok { + that2, ok := that.(UserIDStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.UserId != that1.UserId { + return false + } + if !this.Data.Equal(that1.Data) { + return false + } + return true +} +func (this *UsersStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UsersStatsResponse) + if !ok { + that2, ok := that.(UsersStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Stats) != len(that1.Stats) { + return false + } + for i := range this.Stats { + if !this.Stats[i].Equal(that1.Stats[i]) { + return false + } + } + return true +} +func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsForLabelMatchersRequest) + if !ok { + that2, ok := that.(MetricsForLabelMatchersRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if len(this.MatchersSet) != len(that1.MatchersSet) { + return false + } + for i := range this.MatchersSet { + if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { + return false + } + } + return true +} +func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsForLabelMatchersResponse) + if !ok { + that2, ok := that.(MetricsForLabelMatchersResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Metric) != len(that1.Metric) { + return false + } + for i := range this.Metric { + if !this.Metric[i].Equal(that1.Metric[i]) { + return false + } + } + return true +} +func (this *TimeSeriesChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesChunk) + if !ok { + that2, ok := that.(TimeSeriesChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(&that1.Chunks[i]) { + return false + } + } + return true +} +func (this *Chunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Chunk) + if !ok { + that2, ok := that.(Chunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if this.Encoding != that1.Encoding { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferChunksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferChunksResponse) + if !ok { + that2, ok := that.(TransferChunksResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *TimeSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeries) + if !ok { + that2, ok := that.(TimeSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } + } + return true +} +func (this *LabelPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelPair) + if !ok { + that2, ok := that.(LabelPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Name, that1.Name) { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Sample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sample) + if !ok { + that2, ok := that.(Sample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + return true +} +func (this *LabelMatchers) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelMatchers) + if !ok { + that2, ok := that.(LabelMatchers) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } + } + return true +} +func (this *Metric) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Metric) + if !ok { + that2, ok := that.(Metric) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + return true +} +func (this *LabelMatcher) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelMatcher) + if !ok { + that2, ok := that.(LabelMatcher) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *TimeSeriesFile) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesFile) + if !ok { + that2, ok := that.(TimeSeriesFile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if this.Filename != that1.Filename { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferTSDBResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferTSDBResponse) + if !ok { + that2, ok := that.(TransferTSDBResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *WriteRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.WriteRequest{") + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *WriteResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.WriteResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ReadRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.ReadRequest{") + if this.Queries != nil { + s = append(s, "Queries: "+fmt.Sprintf("%#v", this.Queries)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ReadResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.ReadResponse{") + if this.Results != nil { + s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.QueryRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.QueryResponse{") + if this.Timeseries != nil { + vs := make([]*TimeSeries, len(this.Timeseries)) + for i := range vs { + vs[i] = &this.Timeseries[i] + } + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryStreamResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.QueryStreamResponse{") + if this.Timeseries != nil { + vs := make([]*TimeSeriesChunk, len(this.Timeseries)) + for i := range vs { + vs[i] = &this.Timeseries[i] + } + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValuesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesRequest{") + s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValuesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesResponse{") + s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelNamesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.LabelNamesRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelNamesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelNamesResponse{") + s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserStatsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.UserStatsRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.UserStatsResponse{") + s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") + s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") + s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") + s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserIDStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.UserIDStatsResponse{") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UsersStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.UsersStatsResponse{") + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetricsForLabelMatchersRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.MetricsForLabelMatchersRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.MatchersSet != nil { + s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetricsForLabelMatchersResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.MetricsForLabelMatchersResponse{") + if this.Metric != nil { + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesChunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.TimeSeriesChunk{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Chunks != nil { + vs := make([]*Chunk, len(this.Chunks)) + for i := range vs { + vs[i] = &this.Chunks[i] + } + s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Chunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.Chunk{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferChunksResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.TransferChunksResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeries) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.TimeSeries{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelPair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.LabelPair{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.Sample{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelMatchers) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelMatchers{") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Metric) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.Metric{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelMatcher) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.LabelMatcher{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesFile) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.TimeSeriesFile{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferTSDBResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.TransferTSDBResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCortex(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IngesterClient is the client API for Ingester service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngesterClient interface { + Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) + QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) + LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) + LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) + UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) + AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) + MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) + // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). + TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) +} + +type ingesterClient struct { + cc *grpc.ClientConn +} + +func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { + return &ingesterClient{cc} +} + +func (c *ingesterClient) Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { + out := new(QueryResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) + if err != nil { + return nil, err + } + x := &ingesterQueryStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Ingester_QueryStreamClient interface { + Recv() (*QueryStreamResponse, error) + grpc.ClientStream +} + +type ingesterQueryStreamClient struct { + grpc.ClientStream +} + +func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { + m := new(QueryStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { + out := new(LabelValuesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { + out := new(LabelNamesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { + out := new(UserStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { + out := new(UsersStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { + out := new(MetricsForLabelMatchersResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/TransferChunks", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferChunksClient{stream} + return x, nil +} + +type Ingester_TransferChunksClient interface { + Send(*TimeSeriesChunk) error + CloseAndRecv() (*TransferChunksResponse, error) + grpc.ClientStream +} + +type ingesterTransferChunksClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferChunksResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingesterClient) TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/TransferTSDB", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferTSDBClient{stream} + return x, nil +} + +type Ingester_TransferTSDBClient interface { + Send(*TimeSeriesFile) error + CloseAndRecv() (*TransferTSDBResponse, error) + grpc.ClientStream +} + +type ingesterTransferTSDBClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferTSDBClient) Send(m *TimeSeriesFile) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBClient) CloseAndRecv() (*TransferTSDBResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferTSDBResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// IngesterServer is the server API for Ingester service. +type IngesterServer interface { + Push(context.Context, *WriteRequest) (*WriteResponse, error) + Query(context.Context, *QueryRequest) (*QueryResponse, error) + QueryStream(*QueryRequest, Ingester_QueryStreamServer) error + LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) + LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) + UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) + AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) + MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) + // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). + TransferChunks(Ingester_TransferChunksServer) error + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(Ingester_TransferTSDBServer) error +} + +func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { + s.RegisterService(&_Ingester_serviceDesc, srv) +} + +func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).Push(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/Push", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).Push(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).Query(ctx, req.(*QueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) +} + +type Ingester_QueryStreamServer interface { + Send(*QueryStreamResponse) error + grpc.ServerStream +} + +type ingesterQueryStreamServer struct { + grpc.ServerStream +} + +func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelValuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).LabelValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).LabelNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).UserStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/UserStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).AllUserStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/AllUserStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsForLabelMatchersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) +} + +type Ingester_TransferChunksServer interface { + SendAndClose(*TransferChunksResponse) error + Recv() (*TimeSeriesChunk, error) + grpc.ServerStream +} + +type ingesterTransferChunksServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { + m := new(TimeSeriesChunk) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Ingester_TransferTSDB_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferTSDB(&ingesterTransferTSDBServer{stream}) +} + +type Ingester_TransferTSDBServer interface { + SendAndClose(*TransferTSDBResponse) error + Recv() (*TimeSeriesFile, error) + grpc.ServerStream +} + +type ingesterTransferTSDBServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferTSDBServer) SendAndClose(m *TransferTSDBResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBServer) Recv() (*TimeSeriesFile, error) { + m := new(TimeSeriesFile) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Ingester_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cortex.Ingester", + HandlerType: (*IngesterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _Ingester_Push_Handler, + }, + { + MethodName: "Query", + Handler: _Ingester_Query_Handler, + }, + { + MethodName: "LabelValues", + Handler: _Ingester_LabelValues_Handler, + }, + { + MethodName: "LabelNames", + Handler: _Ingester_LabelNames_Handler, + }, + { + MethodName: "UserStats", + Handler: _Ingester_UserStats_Handler, + }, + { + MethodName: "AllUserStats", + Handler: _Ingester_AllUserStats_Handler, + }, + { + MethodName: "MetricsForLabelMatchers", + Handler: _Ingester_MetricsForLabelMatchers_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "QueryStream", + Handler: _Ingester_QueryStream_Handler, + ServerStreams: true, + }, + { + StreamName: "TransferChunks", + Handler: _Ingester_TransferChunks_Handler, + ClientStreams: true, + }, + { + StreamName: "TransferTSDB", + Handler: _Ingester_TransferTSDB_Handler, + ClientStreams: true, + }, + }, + Metadata: "cortex.proto", +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, msg := range m.Timeseries { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Source != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.Source)) + } + return i, nil +} + +func (m *WriteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Queries) > 0 { + for _, msg := range m.Queries { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Results) > 0 { + for _, msg := range m.Results { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestampMs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, msg := range m.Matchers { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *QueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, msg := range m.Timeseries { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, msg := range m.Timeseries { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.LabelName) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelName))) + i += copy(dAtA[i:], m.LabelName) + } + return i, nil +} + +func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.IngestionRate != 0 { + dAtA[i] = 0x9 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) + i += 8 + } + if m.NumSeries != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) + } + if m.ApiIngestionRate != 0 { + dAtA[i] = 0x19 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) + i += 8 + } + if m.RuleIngestionRate != 0 { + dAtA[i] = 0x21 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) + i += 8 + } + return i, nil +} + +func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UserId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) + } + if m.Data != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size())) + n1, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Stats) > 0 { + for _, msg := range m.Stats { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestampMs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + } + if len(m.MatchersSet) > 0 { + for _, msg := range m.MatchersSet { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Metric) > 0 { + for _, msg := range m.Metric { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FromIngesterId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i += copy(dAtA[i:], m.FromIngesterId) + } + if len(m.UserId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) + } + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Chunks) > 0 { + for _, msg := range m.Chunks { + dAtA[i] = 0x22 + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestampMs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + } + if m.Encoding != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.Encoding)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Samples) > 0 { + for _, msg := range m.Samples { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x9 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 + } + if m.TimestampMs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) + } + return i, nil +} + +func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Matchers) > 0 { + for _, msg := range m.Matchers { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FromIngesterId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i += copy(dAtA[i:], m.FromIngesterId) + } + if len(m.UserId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) + } + if len(m.Filename) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) + i += copy(dAtA[i:], m.Filename) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if m.Source != 0 { + n += 1 + sovCortex(uint64(m.Source)) + } + return n +} + +func (m *WriteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *ReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelValuesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *LabelValuesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + l = len(s) + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelNamesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LabelNamesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *UserStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *UserStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IngestionRate != 0 { + n += 9 + } + if m.NumSeries != 0 { + n += 1 + sovCortex(uint64(m.NumSeries)) + } + if m.ApiIngestionRate != 0 { + n += 9 + } + if m.RuleIngestionRate != 0 { + n += 9 + } + return n +} + +func (m *UserIDStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *UsersStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *MetricsForLabelMatchersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if len(m.MatchersSet) > 0 { + for _, e := range m.MatchersSet { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *MetricsForLabelMatchersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *TimeSeriesChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if m.Encoding != 0 { + n += 1 + sovCortex(uint64(m.Encoding)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TransferChunksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.TimestampMs != 0 { + n += 1 + sovCortex(uint64(m.TimestampMs)) + } + return n +} + +func (m *LabelMatchers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCortex(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TimeSeriesFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TransferTSDBResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovCortex(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCortex(x uint64) (n int) { + return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WriteRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteRequest{`, + `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *WriteResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteResponse{`, + `}`, + }, "") + return s +} +func (this *ReadRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadRequest{`, + `Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReadResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadResponse{`, + `Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResponse{`, + `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryStreamResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryStreamResponse{`, + `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesRequest{`, + `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesResponse{`, + `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, + `}`, + }, "") + return s +} +func (this *LabelNamesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesRequest{`, + `}`, + }, "") + return s +} +func (this *LabelNamesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesResponse{`, + `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, + `}`, + }, "") + return s +} +func (this *UserStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsRequest{`, + `}`, + }, "") + return s +} +func (this *UserStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsResponse{`, + `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, + `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, + `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, + `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, + `}`, + }, "") + return s +} +func (this *UserIDStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIDStatsResponse{`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UsersStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UsersStatsResponse{`, + `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, + `Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesChunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeriesChunk{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Chunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Chunk{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferChunksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferChunksResponse{`, + `}`, + }, "") + return s +} +func (this *TimeSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeries{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelPair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelPair{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatchers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelMatchers{`, + `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatcher) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelMatcher{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeriesFile{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferTSDBResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferTSDBResponse{`, + `}`, + }, "") + return s +} +func valueToStringCortex(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, PreallocTimeseries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + m.Source = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &QueryRequest{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResponse{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeriesChunk{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.IngestionRate = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) + } + m.NumSeries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumSeries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ApiIngestionRate = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.RuleIngestionRate = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &UserStatsResponse{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &UserIDStatsResponse{}) + if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) + if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, &Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) + } + m.Encoding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Encoding |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatchers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MatchType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferTSDBResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferTSDBResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferTSDBResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCortex(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCortex + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCortex(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow") +) diff --git a/integration-tests/proto/1.2.1/timeseries.go b/integration-tests/proto/1.2.1/timeseries.go new file mode 100644 index 0000000000..204382952b --- /dev/null +++ b/integration-tests/proto/1.2.1/timeseries.go @@ -0,0 +1,256 @@ +package client + +import ( + "flag" + "fmt" + "io" + "strings" + "sync" + "unsafe" + + "github.com/prometheus/prometheus/pkg/labels" +) + +var ( + expectedTimeseries = 100 + expectedLabels = 20 + expectedSamplesPerSeries = 10 + + slicePool = sync.Pool{ + New: func() interface{} { + return make([]PreallocTimeseries, 0, expectedTimeseries) + }, + } + + timeSeriesPool = sync.Pool{ + New: func() interface{} { + return &TimeSeries{ + Labels: make([]LabelAdapter, 0, expectedLabels), + Samples: make([]Sample, 0, expectedSamplesPerSeries), + } + }, + } +) + +// PreallocConfig configures how structures will be preallocated to optimise +// proto unmarshalling. +type PreallocConfig struct{} + +// RegisterFlags registers configuration settings. +func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, use for preallocations.") + f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.") + f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.") +} + +// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal. +type PreallocWriteRequest struct { + WriteRequest +} + +// Unmarshal implements proto.Message. +func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error { + p.Timeseries = slicePool.Get().([]PreallocTimeseries) + return p.WriteRequest.Unmarshal(dAtA) +} + +// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal. +type PreallocTimeseries struct { + *TimeSeries +} + +// Unmarshal implements proto.Message. +func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { + p.TimeSeries = timeSeriesPool.Get().(*TimeSeries) + return p.TimeSeries.Unmarshal(dAtA) +} + +// LabelAdapter is a labels.Label that can be marshalled to/from protos. +type LabelAdapter labels.Label + +// Marshal implements proto.Marshaller. +func (bs *LabelAdapter) Marshal() ([]byte, error) { + buf := make([]byte, bs.Size()) + _, err := bs.MarshalTo(buf) + return buf, err +} + +// MarshalTo implements proto.Marshaller. +func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { + var i int + ls := (*labels.Label)(bs) + + buf[i] = 0xa + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) + i += copy(buf[i:], ls.Name) + + buf[i] = 0x12 + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) + i += copy(buf[i:], ls.Value) + + return i, nil +} + +// Unmarshal a LabelAdapater, implements proto.Unmarshaller. +// NB this is a copy of the autogenerated code to unmarshal a LabelPair, +// with the byte copying replaced with a yoloString. +func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Value = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func yoloString(buf []byte) string { + return *((*string)(unsafe.Pointer(&buf))) +} + +// Size implements proto.Sizer. +func (bs *LabelAdapter) Size() int { + ls := (*labels.Label)(bs) + var n int + l := len(ls.Name) + n += 1 + l + sovCortex(uint64(l)) + l = len(ls.Value) + n += 1 + l + sovCortex(uint64(l)) + return n +} + +// Equal implements proto.Equaler. +func (bs *LabelAdapter) Equal(other LabelAdapter) bool { + return bs.Name == other.Name && bs.Value == other.Value +} + +// Compare implements proto.Comparer. +func (bs *LabelAdapter) Compare(other LabelAdapter) int { + if c := strings.Compare(bs.Name, other.Name); c != 0 { + return c + } + return strings.Compare(bs.Value, other.Value) +} + +// ReuseSlice puts the slice back into a sync.Pool for reuse. +func ReuseSlice(slice []PreallocTimeseries) { + for i := range slice { + ReuseTimeseries(slice[i].TimeSeries) + } + slicePool.Put(slice[:0]) +} + +// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. +func ReuseTimeseries(ts *TimeSeries) { + ts.Labels = ts.Labels[:0] + ts.Samples = ts.Samples[:0] + timeSeriesPool.Put(ts) +} diff --git a/integration-tests/proto/1.3.0/cortex.pb.go b/integration-tests/proto/1.3.0/cortex.pb.go new file mode 100644 index 0000000000..4bd6772008 --- /dev/null +++ b/integration-tests/proto/1.3.0/cortex.pb.go @@ -0,0 +1,7967 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cortex.proto + +package client + +import ( + bytes "bytes" + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MatchType int32 + +const ( + EQUAL MatchType = 0 + NOT_EQUAL MatchType = 1 + REGEX_MATCH MatchType = 2 + REGEX_NO_MATCH MatchType = 3 +) + +var MatchType_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "REGEX_MATCH", + 3: "REGEX_NO_MATCH", +} + +var MatchType_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "REGEX_MATCH": 2, + "REGEX_NO_MATCH": 3, +} + +func (MatchType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} +} + +type WriteRequest_SourceEnum int32 + +const ( + API WriteRequest_SourceEnum = 0 + RULE WriteRequest_SourceEnum = 1 +) + +var WriteRequest_SourceEnum_name = map[int32]string{ + 0: "API", + 1: "RULE", +} + +var WriteRequest_SourceEnum_value = map[string]int32{ + "API": 0, + "RULE": 1, +} + +func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0, 0} +} + +type WriteRequest struct { + Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` + Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { + if m != nil { + return m.Source + } + return API +} + +type WriteResponse struct { +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{1} +} +func (m *WriteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) +} +func (m *WriteResponse) XXX_Size() int { + return m.Size() +} +func (m *WriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponse proto.InternalMessageInfo + +type ReadRequest struct { + Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{2} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetQueries() []*QueryRequest { + if m != nil { + return m.Queries + } + return nil +} + +type ReadResponse struct { + Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{3} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetResults() []*QueryResponse { + if m != nil { + return m.Results + } + return nil +} + +type QueryRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (m *QueryRequest) Reset() { *m = QueryRequest{} } +func (*QueryRequest) ProtoMessage() {} +func (*QueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{4} +} +func (m *QueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRequest.Merge(m, src) +} +func (m *QueryRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRequest proto.InternalMessageInfo + +func (m *QueryRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *QueryRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *QueryRequest) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +type QueryResponse struct { + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` +} + +func (m *QueryResponse) Reset() { *m = QueryResponse{} } +func (*QueryResponse) ProtoMessage() {} +func (*QueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{5} +} +func (m *QueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResponse.Merge(m, src) +} +func (m *QueryResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResponse proto.InternalMessageInfo + +func (m *QueryResponse) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// QueryStreamResponse contains a batch of timeseries chunks. +type QueryStreamResponse struct { + Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` +} + +func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } +func (*QueryStreamResponse) ProtoMessage() {} +func (*QueryStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{6} +} +func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStreamResponse.Merge(m, src) +} +func (m *QueryStreamResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStreamResponse proto.InternalMessageInfo + +func (m *QueryStreamResponse) GetTimeseries() []TimeSeriesChunk { + if m != nil { + return m.Timeseries + } + return nil +} + +type LabelValuesRequest struct { + LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` +} + +func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } +func (*LabelValuesRequest) ProtoMessage() {} +func (*LabelValuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{7} +} +func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequest.Merge(m, src) +} +func (m *LabelValuesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo + +func (m *LabelValuesRequest) GetLabelName() string { + if m != nil { + return m.LabelName + } + return "" +} + +type LabelValuesResponse struct { + LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` +} + +func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } +func (*LabelValuesResponse) ProtoMessage() {} +func (*LabelValuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{8} +} +func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponse.Merge(m, src) +} +func (m *LabelValuesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo + +func (m *LabelValuesResponse) GetLabelValues() []string { + if m != nil { + return m.LabelValues + } + return nil +} + +type LabelNamesRequest struct { +} + +func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } +func (*LabelNamesRequest) ProtoMessage() {} +func (*LabelNamesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{9} +} +func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequest.Merge(m, src) +} +func (m *LabelNamesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo + +type LabelNamesResponse struct { + LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } +func (*LabelNamesResponse) ProtoMessage() {} +func (*LabelNamesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{10} +} +func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponse.Merge(m, src) +} +func (m *LabelNamesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo + +func (m *LabelNamesResponse) GetLabelNames() []string { + if m != nil { + return m.LabelNames + } + return nil +} + +type UserStatsRequest struct { +} + +func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } +func (*UserStatsRequest) ProtoMessage() {} +func (*UserStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{11} +} +func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsRequest.Merge(m, src) +} +func (m *UserStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *UserStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UserStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UserStatsRequest proto.InternalMessageInfo + +type UserStatsResponse struct { + IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate,proto3" json:"ingestion_rate,omitempty"` + NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` + ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` + RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` +} + +func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } +func (*UserStatsResponse) ProtoMessage() {} +func (*UserStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{12} +} +func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserStatsResponse.Merge(m, src) +} +func (m *UserStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UserStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UserStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UserStatsResponse proto.InternalMessageInfo + +func (m *UserStatsResponse) GetIngestionRate() float64 { + if m != nil { + return m.IngestionRate + } + return 0 +} + +func (m *UserStatsResponse) GetNumSeries() uint64 { + if m != nil { + return m.NumSeries + } + return 0 +} + +func (m *UserStatsResponse) GetApiIngestionRate() float64 { + if m != nil { + return m.ApiIngestionRate + } + return 0 +} + +func (m *UserStatsResponse) GetRuleIngestionRate() float64 { + if m != nil { + return m.RuleIngestionRate + } + return 0 +} + +type UserIDStatsResponse struct { + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } +func (*UserIDStatsResponse) ProtoMessage() {} +func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{13} +} +func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIDStatsResponse.Merge(m, src) +} +func (m *UserIDStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UserIDStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UserIDStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UserIDStatsResponse proto.InternalMessageInfo + +func (m *UserIDStatsResponse) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *UserIDStatsResponse) GetData() *UserStatsResponse { + if m != nil { + return m.Data + } + return nil +} + +type UsersStatsResponse struct { + Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` +} + +func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } +func (*UsersStatsResponse) ProtoMessage() {} +func (*UsersStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{14} +} +func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsersStatsResponse.Merge(m, src) +} +func (m *UsersStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *UsersStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UsersStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UsersStatsResponse proto.InternalMessageInfo + +func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { + if m != nil { + return m.Stats + } + return nil +} + +type MetricsForLabelMatchersRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` +} + +func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } +func (*MetricsForLabelMatchersRequest) ProtoMessage() {} +func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{15} +} +func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) +} +func (m *MetricsForLabelMatchersRequest) XXX_Size() int { + return m.Size() +} +func (m *MetricsForLabelMatchersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsForLabelMatchersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsForLabelMatchersRequest proto.InternalMessageInfo + +func (m *MetricsForLabelMatchersRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *MetricsForLabelMatchersRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { + if m != nil { + return m.MatchersSet + } + return nil +} + +type MetricsForLabelMatchersResponse struct { + Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` +} + +func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } +func (*MetricsForLabelMatchersResponse) ProtoMessage() {} +func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{16} +} +func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) +} +func (m *MetricsForLabelMatchersResponse) XXX_Size() int { + return m.Size() +} +func (m *MetricsForLabelMatchersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsForLabelMatchersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsForLabelMatchersResponse proto.InternalMessageInfo + +func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +type TimeSeriesChunk struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` + Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` +} + +func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } +func (*TimeSeriesChunk) ProtoMessage() {} +func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{17} +} +func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesChunk.Merge(m, src) +} +func (m *TimeSeriesChunk) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesChunk) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo + +func (m *TimeSeriesChunk) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesChunk) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesChunk) GetChunks() []Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +type Chunk struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Encoding int32 `protobuf:"varint,3,opt,name=encoding,proto3" json:"encoding,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{18} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *Chunk) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *Chunk) GetEncoding() int32 { + if m != nil { + return m.Encoding + } + return 0 +} + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferChunksResponse struct { +} + +func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } +func (*TransferChunksResponse) ProtoMessage() {} +func (*TransferChunksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{19} +} +func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferChunksResponse.Merge(m, src) +} +func (m *TransferChunksResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferChunksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo + +type TimeSeries struct { + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` + // Sorted by time, oldest sample first. + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{20} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +type LabelPair struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{21} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LabelPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{22} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type LabelMatchers struct { + Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } +func (*LabelMatchers) ProtoMessage() {} +func (*LabelMatchers) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{23} +} +func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatchers) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatchers.Merge(m, src) +} +func (m *LabelMatchers) XXX_Size() int { + return m.Size() +} +func (m *LabelMatchers) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatchers.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo + +func (m *LabelMatchers) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +type Metric struct { + Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{24} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type LabelMatcher struct { + Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{25} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo + +func (m *LabelMatcher) GetType() MatchType { + if m != nil { + return m.Type + } + return EQUAL +} + +func (m *LabelMatcher) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelMatcher) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type TimeSeriesFile struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } +func (*TimeSeriesFile) ProtoMessage() {} +func (*TimeSeriesFile) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{26} +} +func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesFile.Merge(m, src) +} +func (m *TimeSeriesFile) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesFile) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo + +func (m *TimeSeriesFile) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesFile) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesFile) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *TimeSeriesFile) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferTSDBResponse struct { +} + +func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } +func (*TransferTSDBResponse) ProtoMessage() {} +func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{27} +} +func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferTSDBResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferTSDBResponse.Merge(m, src) +} +func (m *TransferTSDBResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferTSDBResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferTSDBResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) + proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) + proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse") + proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse") + proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest") + proto.RegisterType((*QueryResponse)(nil), "cortex.QueryResponse") + proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse") + proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest") + proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse") + proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest") + proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse") + proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest") + proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse") + proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse") + proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") + proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") + proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") + proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk") + proto.RegisterType((*Chunk)(nil), "cortex.Chunk") + proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse") + proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries") + proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair") + proto.RegisterType((*Sample)(nil), "cortex.Sample") + proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") + proto.RegisterType((*Metric)(nil), "cortex.Metric") + proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") + proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile") + proto.RegisterType((*TransferTSDBResponse)(nil), "cortex.TransferTSDBResponse") +} + +func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } + +var fileDescriptor_893a47d0a749d749 = []byte{ + // 1266 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xdf, 0x8d, 0x5f, 0x12, 0x3f, 0xde, 0xb8, 0xce, 0x24, 0x6d, 0xd3, 0xed, 0xff, 0xbf, 0x29, + 0x23, 0xb5, 0x44, 0x40, 0xdd, 0x92, 0xaa, 0xd0, 0x03, 0x55, 0xe5, 0xb4, 0x69, 0x6b, 0x94, 0xa4, + 0xe9, 0xd8, 0x05, 0x84, 0x84, 0xac, 0x8d, 0x3d, 0x49, 0x56, 0xec, 0x8b, 0xbb, 0x33, 0x8b, 0xe8, + 0x01, 0x09, 0x89, 0x0f, 0x00, 0x47, 0x3e, 0x02, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8f, 0x15, + 0x87, 0x8a, 0x3a, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xee, 0x5a, 0x44, 0x40, + 0x6f, 0x3b, 0xcf, 0xf3, 0x9b, 0xdf, 0x3c, 0xaf, 0x33, 0xcf, 0x82, 0x31, 0x08, 0x42, 0x4e, 0xbf, + 0x6e, 0x8d, 0xc2, 0x80, 0x07, 0xa8, 0x2a, 0x57, 0xe6, 0xe5, 0x43, 0x87, 0x1f, 0x45, 0xfb, 0xad, + 0x41, 0xe0, 0x5d, 0x39, 0x0c, 0x0e, 0x83, 0x2b, 0x42, 0xbd, 0x1f, 0x1d, 0x88, 0x95, 0x58, 0x88, + 0x2f, 0xb9, 0x0d, 0xff, 0xaa, 0x83, 0xf1, 0x69, 0xe8, 0x70, 0x4a, 0xe8, 0xe3, 0x88, 0x32, 0x8e, + 0x76, 0x01, 0xb8, 0xe3, 0x51, 0x46, 0x43, 0x87, 0xb2, 0x55, 0xfd, 0x42, 0x69, 0xbd, 0xbe, 0x81, + 0x5a, 0xea, 0xa8, 0x9e, 0xe3, 0xd1, 0xae, 0xd0, 0x6c, 0x9a, 0x4f, 0x5f, 0xac, 0x69, 0xbf, 0xbf, + 0x58, 0x43, 0x7b, 0x21, 0xb5, 0x5d, 0x37, 0x18, 0xf4, 0xd2, 0x5d, 0x24, 0xc3, 0x80, 0x3e, 0x84, + 0x6a, 0x37, 0x88, 0xc2, 0x01, 0x5d, 0x9d, 0xbb, 0xa0, 0xaf, 0x37, 0x36, 0xd6, 0x12, 0xae, 0xec, + 0xa9, 0x2d, 0x09, 0xd9, 0xf2, 0x23, 0x8f, 0x54, 0x99, 0xf8, 0xc6, 0x6b, 0x00, 0x13, 0x29, 0x9a, + 0x87, 0x52, 0x7b, 0xaf, 0xd3, 0xd4, 0xd0, 0x02, 0x94, 0xc9, 0xa3, 0xed, 0xad, 0xa6, 0x8e, 0x4f, + 0xc1, 0xa2, 0xe2, 0x60, 0xa3, 0xc0, 0x67, 0x14, 0xdf, 0x84, 0x3a, 0xa1, 0xf6, 0x30, 0xf1, 0xa4, + 0x05, 0xf3, 0x8f, 0xa3, 0xac, 0x1b, 0x2b, 0xc9, 0xd1, 0x0f, 0x23, 0x1a, 0x3e, 0x51, 0x30, 0x92, + 0x80, 0xf0, 0x2d, 0x30, 0xe4, 0x76, 0x49, 0x87, 0xae, 0xc0, 0x7c, 0x48, 0x59, 0xe4, 0xf2, 0x64, + 0xff, 0xe9, 0xa9, 0xfd, 0x12, 0x47, 0x12, 0x14, 0xfe, 0x51, 0x07, 0x23, 0x4b, 0x8d, 0xde, 0x03, + 0xc4, 0xb8, 0x1d, 0xf2, 0xbe, 0x88, 0x07, 0xb7, 0xbd, 0x51, 0xdf, 0x8b, 0xc9, 0xf4, 0xf5, 0x12, + 0x69, 0x0a, 0x4d, 0x2f, 0x51, 0xec, 0x30, 0xb4, 0x0e, 0x4d, 0xea, 0x0f, 0xf3, 0xd8, 0x39, 0x81, + 0x6d, 0x50, 0x7f, 0x98, 0x45, 0x5e, 0x85, 0x05, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xd5, 0x52, + 0xde, 0xb5, 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x49, 0x52, 0x14, 0xee, 0xc0, 0x62, 0xce, 0x68, + 0x74, 0xe3, 0x84, 0x69, 0x2e, 0xc7, 0x69, 0xce, 0x26, 0x14, 0xf7, 0x60, 0x59, 0x50, 0x75, 0x79, + 0x48, 0x6d, 0x2f, 0x25, 0xbc, 0x59, 0x40, 0x78, 0xf6, 0x75, 0xc2, 0xdb, 0x47, 0x91, 0xff, 0x65, + 0x01, 0xeb, 0x35, 0x40, 0xc2, 0xf4, 0x4f, 0x6c, 0x37, 0xa2, 0x2c, 0x09, 0xe0, 0xff, 0x01, 0xdc, + 0x58, 0xda, 0xf7, 0x6d, 0x8f, 0x8a, 0xc0, 0xd5, 0x48, 0x4d, 0x48, 0x76, 0x6d, 0x8f, 0xe2, 0x1b, + 0xb0, 0x9c, 0xdb, 0xa4, 0x4c, 0x79, 0x0b, 0x0c, 0xb9, 0xeb, 0x2b, 0x21, 0x17, 0xc6, 0xd4, 0x48, + 0xdd, 0x9d, 0x40, 0xf1, 0x32, 0x2c, 0x6d, 0x27, 0x34, 0xc9, 0x69, 0xf8, 0xba, 0xb2, 0x41, 0x09, + 0x15, 0xdb, 0x1a, 0xd4, 0x27, 0x36, 0x24, 0x64, 0x90, 0x1a, 0xc1, 0x30, 0x82, 0xe6, 0x23, 0x46, + 0xc3, 0x2e, 0xb7, 0x79, 0x4a, 0xf5, 0x8b, 0x0e, 0x4b, 0x19, 0xa1, 0xa2, 0xba, 0x08, 0x0d, 0xc7, + 0x3f, 0xa4, 0x8c, 0x3b, 0x81, 0xdf, 0x0f, 0x6d, 0x2e, 0x5d, 0xd2, 0xc9, 0x62, 0x2a, 0x25, 0x36, + 0xa7, 0xb1, 0xd7, 0x7e, 0xe4, 0xf5, 0x55, 0x28, 0xe3, 0x12, 0x28, 0x93, 0x9a, 0x1f, 0x79, 0x32, + 0x82, 0x71, 0x55, 0xd9, 0x23, 0xa7, 0x3f, 0xc5, 0x54, 0x12, 0x4c, 0x4d, 0x7b, 0xe4, 0x74, 0x72, + 0x64, 0x2d, 0x58, 0x0e, 0x23, 0x97, 0x4e, 0xc3, 0xcb, 0x02, 0xbe, 0x14, 0xab, 0x72, 0x78, 0xfc, + 0x05, 0x2c, 0xc7, 0x86, 0x77, 0xee, 0xe4, 0x4d, 0x3f, 0x0b, 0xf3, 0x11, 0xa3, 0x61, 0xdf, 0x19, + 0xaa, 0x34, 0x54, 0xe3, 0x65, 0x67, 0x88, 0x2e, 0x43, 0x79, 0x68, 0x73, 0x5b, 0x98, 0x59, 0xdf, + 0x38, 0x97, 0x64, 0xfc, 0x35, 0xe7, 0x89, 0x80, 0xe1, 0x7b, 0x80, 0x62, 0x15, 0xcb, 0xb3, 0xbf, + 0x0f, 0x15, 0x16, 0x0b, 0x54, 0xdd, 0x9c, 0xcf, 0xb2, 0x4c, 0x59, 0x42, 0x24, 0x12, 0xff, 0xac, + 0x83, 0xb5, 0x43, 0x79, 0xe8, 0x0c, 0xd8, 0xdd, 0x20, 0xcc, 0x96, 0x3d, 0x7b, 0xd3, 0xed, 0x77, + 0x03, 0x8c, 0xa4, 0xb1, 0xfa, 0x8c, 0x72, 0xd5, 0x82, 0xa7, 0x8b, 0x5a, 0x90, 0x91, 0x7a, 0x02, + 0xed, 0x52, 0x8e, 0x3b, 0xb0, 0x36, 0xd3, 0x66, 0x15, 0x8a, 0x4b, 0x50, 0xf5, 0x04, 0x44, 0xc5, + 0xa2, 0x91, 0xd0, 0xca, 0x8d, 0x44, 0x69, 0xf1, 0x6f, 0x3a, 0x9c, 0x9a, 0x6a, 0xab, 0xd8, 0x85, + 0x83, 0x30, 0xf0, 0x54, 0xae, 0xb3, 0xd9, 0x6a, 0xc4, 0xf2, 0x8e, 0x12, 0x77, 0x86, 0xd9, 0x74, + 0xce, 0xe5, 0xd2, 0x79, 0x0b, 0xaa, 0xa2, 0xb4, 0x93, 0x8b, 0x65, 0x29, 0xe7, 0xd5, 0x9e, 0xed, + 0x84, 0x9b, 0x2b, 0xea, 0xe6, 0x37, 0x84, 0xa8, 0x3d, 0xb4, 0x47, 0x9c, 0x86, 0x44, 0x6d, 0x43, + 0xef, 0x42, 0x75, 0x10, 0x1b, 0xc3, 0x56, 0xcb, 0x82, 0x60, 0x31, 0x21, 0xc8, 0x76, 0xbe, 0x82, + 0xe0, 0xef, 0x75, 0xa8, 0x48, 0xd3, 0xdf, 0x54, 0xae, 0x4c, 0x58, 0xa0, 0xfe, 0x20, 0x18, 0x3a, + 0xfe, 0xa1, 0x68, 0x91, 0x0a, 0x49, 0xd7, 0x08, 0xa9, 0xd2, 0x8d, 0x7b, 0xc1, 0x50, 0xf5, 0xb9, + 0x0a, 0x67, 0x7a, 0xa1, 0xed, 0xb3, 0x03, 0x1a, 0x0a, 0xc3, 0xd2, 0xc4, 0xe0, 0x6f, 0x00, 0x26, + 0xf1, 0xce, 0xc4, 0x49, 0xff, 0x67, 0x71, 0x6a, 0xc1, 0x3c, 0xb3, 0xbd, 0x91, 0x2b, 0x3a, 0x3c, + 0x97, 0xe8, 0xae, 0x10, 0xab, 0x48, 0x25, 0x20, 0x7c, 0x1d, 0x6a, 0x29, 0x75, 0x6c, 0x79, 0x7a, + 0x23, 0x1a, 0x44, 0x7c, 0xa3, 0x15, 0xa8, 0x88, 0xfb, 0x4e, 0x04, 0xc2, 0x20, 0x72, 0x81, 0xdb, + 0x50, 0x95, 0x7c, 0x13, 0xbd, 0xbc, 0x73, 0xe4, 0x22, 0xbe, 0x2b, 0x0b, 0xa2, 0x58, 0xe7, 0x93, + 0x10, 0xe2, 0x36, 0x2c, 0xe6, 0x4a, 0x35, 0xf7, 0xfc, 0xe8, 0x27, 0x7c, 0x7e, 0xaa, 0xb2, 0x7c, + 0xff, 0x75, 0xdc, 0x70, 0x1f, 0x8c, 0xec, 0x21, 0xe8, 0x22, 0x94, 0xf9, 0x93, 0x91, 0xf4, 0xaa, + 0x31, 0xa1, 0x13, 0xea, 0xde, 0x93, 0x11, 0x25, 0x42, 0x9d, 0x46, 0x4c, 0x56, 0xfb, 0x54, 0xc4, + 0x4a, 0x42, 0xa8, 0x22, 0xf6, 0x9d, 0x0e, 0x8d, 0x49, 0xa2, 0xef, 0x3a, 0x2e, 0xfd, 0x2f, 0xfa, + 0xca, 0x84, 0x85, 0x03, 0xc7, 0xa5, 0xc2, 0x06, 0x79, 0x5c, 0xba, 0x2e, 0xac, 0xc3, 0x33, 0xb0, + 0x92, 0xd4, 0x61, 0xaf, 0x7b, 0x67, 0x33, 0xa9, 0xc2, 0x77, 0x3e, 0x86, 0x5a, 0xea, 0x1a, 0xaa, + 0x41, 0x65, 0xeb, 0xe1, 0xa3, 0xf6, 0x76, 0x53, 0x43, 0x8b, 0x50, 0xdb, 0x7d, 0xd0, 0xeb, 0xcb, + 0xa5, 0x8e, 0x4e, 0x41, 0x9d, 0x6c, 0xdd, 0xdb, 0xfa, 0xac, 0xbf, 0xd3, 0xee, 0xdd, 0xbe, 0xdf, + 0x9c, 0x43, 0x08, 0x1a, 0x52, 0xb0, 0xfb, 0x40, 0xc9, 0x4a, 0x1b, 0xc7, 0x15, 0x58, 0x48, 0x6c, + 0x47, 0xd7, 0xa1, 0xbc, 0x17, 0xb1, 0x23, 0xb4, 0x52, 0x34, 0x9f, 0x99, 0xa7, 0xa7, 0xa4, 0xaa, + 0x27, 0x34, 0xf4, 0x01, 0x54, 0xc4, 0x34, 0x80, 0x0a, 0x87, 0x2b, 0xb3, 0x78, 0x64, 0xc2, 0x1a, + 0xba, 0x03, 0xf5, 0xcc, 0x14, 0x31, 0x63, 0xf7, 0xf9, 0x9c, 0x34, 0x3f, 0x70, 0x60, 0xed, 0xaa, + 0x8e, 0xee, 0x43, 0x3d, 0x33, 0x00, 0x20, 0x33, 0x57, 0x4c, 0xb9, 0x51, 0x62, 0xc2, 0x55, 0x30, + 0x31, 0x60, 0x0d, 0x6d, 0x01, 0x4c, 0xde, 0x7e, 0x74, 0x2e, 0x07, 0xce, 0x0e, 0x09, 0xa6, 0x59, + 0xa4, 0x4a, 0x69, 0x36, 0xa1, 0x96, 0xbe, 0x7c, 0x68, 0xb5, 0xe0, 0x31, 0x94, 0x24, 0xb3, 0x9f, + 0x49, 0xac, 0xa1, 0xbb, 0x60, 0xb4, 0x5d, 0xf7, 0x24, 0x34, 0x66, 0x56, 0xc3, 0xa6, 0x79, 0x5c, + 0x38, 0x3b, 0xe3, 0xb1, 0x41, 0x97, 0xf2, 0x8f, 0xca, 0xac, 0x17, 0xd4, 0x7c, 0xfb, 0x6f, 0x71, + 0xe9, 0x69, 0x3b, 0xd0, 0xc8, 0x5f, 0x9c, 0x68, 0xd6, 0xf4, 0x67, 0x5a, 0xa9, 0xa2, 0xf8, 0xa6, + 0xd5, 0xd6, 0xe3, 0xcc, 0x1a, 0xd9, 0xfa, 0x47, 0x67, 0x5e, 0x27, 0x8b, 0x5b, 0xd3, 0xfc, 0xdf, + 0x34, 0x57, 0xb6, 0x5b, 0x62, 0xa6, 0xcd, 0x8f, 0x9e, 0xbd, 0xb4, 0xb4, 0xe7, 0x2f, 0x2d, 0xed, + 0xd5, 0x4b, 0x4b, 0xff, 0x76, 0x6c, 0xe9, 0x3f, 0x8d, 0x2d, 0xfd, 0xe9, 0xd8, 0xd2, 0x9f, 0x8d, + 0x2d, 0xfd, 0x8f, 0xb1, 0xa5, 0xff, 0x39, 0xb6, 0xb4, 0x57, 0x63, 0x4b, 0xff, 0xe1, 0xd8, 0xd2, + 0x9e, 0x1d, 0x5b, 0xda, 0xf3, 0x63, 0x4b, 0xfb, 0xbc, 0x3a, 0x70, 0x1d, 0xea, 0xf3, 0xfd, 0xaa, + 0xf8, 0x4d, 0xba, 0xf6, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x33, 0x55, 0x6d, 0x0d, + 0x00, 0x00, +} + +func (x MatchType) String() string { + s, ok := MatchType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x WriteRequest_SourceEnum) String() string { + s, ok := WriteRequest_SourceEnum_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *WriteRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteRequest) + if !ok { + that2, ok := that.(WriteRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(that1.Timeseries[i]) { + return false + } + } + if this.Source != that1.Source { + return false + } + return true +} +func (this *WriteResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteResponse) + if !ok { + that2, ok := that.(WriteResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *ReadRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ReadRequest) + if !ok { + that2, ok := that.(ReadRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Queries) != len(that1.Queries) { + return false + } + for i := range this.Queries { + if !this.Queries[i].Equal(that1.Queries[i]) { + return false + } + } + return true +} +func (this *ReadResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ReadResponse) + if !ok { + that2, ok := that.(ReadResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Results) != len(that1.Results) { + return false + } + for i := range this.Results { + if !this.Results[i].Equal(that1.Results[i]) { + return false + } + } + return true +} +func (this *QueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryRequest) + if !ok { + that2, ok := that.(QueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } + } + return true +} +func (this *QueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResponse) + if !ok { + that2, ok := that.(QueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { + return false + } + } + return true +} +func (this *QueryStreamResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryStreamResponse) + if !ok { + that2, ok := that.(QueryStreamResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { + return false + } + } + return true +} +func (this *LabelValuesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelValuesRequest) + if !ok { + that2, ok := that.(LabelValuesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.LabelName != that1.LabelName { + return false + } + return true +} +func (this *LabelValuesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelValuesResponse) + if !ok { + that2, ok := that.(LabelValuesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelValues) != len(that1.LabelValues) { + return false + } + for i := range this.LabelValues { + if this.LabelValues[i] != that1.LabelValues[i] { + return false + } + } + return true +} +func (this *LabelNamesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelNamesRequest) + if !ok { + that2, ok := that.(LabelNamesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *LabelNamesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelNamesResponse) + if !ok { + that2, ok := that.(LabelNamesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelNames) != len(that1.LabelNames) { + return false + } + for i := range this.LabelNames { + if this.LabelNames[i] != that1.LabelNames[i] { + return false + } + } + return true +} +func (this *UserStatsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserStatsRequest) + if !ok { + that2, ok := that.(UserStatsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *UserStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserStatsResponse) + if !ok { + that2, ok := that.(UserStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.IngestionRate != that1.IngestionRate { + return false + } + if this.NumSeries != that1.NumSeries { + return false + } + if this.ApiIngestionRate != that1.ApiIngestionRate { + return false + } + if this.RuleIngestionRate != that1.RuleIngestionRate { + return false + } + return true +} +func (this *UserIDStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserIDStatsResponse) + if !ok { + that2, ok := that.(UserIDStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.UserId != that1.UserId { + return false + } + if !this.Data.Equal(that1.Data) { + return false + } + return true +} +func (this *UsersStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UsersStatsResponse) + if !ok { + that2, ok := that.(UsersStatsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Stats) != len(that1.Stats) { + return false + } + for i := range this.Stats { + if !this.Stats[i].Equal(that1.Stats[i]) { + return false + } + } + return true +} +func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsForLabelMatchersRequest) + if !ok { + that2, ok := that.(MetricsForLabelMatchersRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if len(this.MatchersSet) != len(that1.MatchersSet) { + return false + } + for i := range this.MatchersSet { + if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { + return false + } + } + return true +} +func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricsForLabelMatchersResponse) + if !ok { + that2, ok := that.(MetricsForLabelMatchersResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Metric) != len(that1.Metric) { + return false + } + for i := range this.Metric { + if !this.Metric[i].Equal(that1.Metric[i]) { + return false + } + } + return true +} +func (this *TimeSeriesChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesChunk) + if !ok { + that2, ok := that.(TimeSeriesChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(&that1.Chunks[i]) { + return false + } + } + return true +} +func (this *Chunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Chunk) + if !ok { + that2, ok := that.(Chunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if this.Encoding != that1.Encoding { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferChunksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferChunksResponse) + if !ok { + that2, ok := that.(TransferChunksResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *TimeSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeries) + if !ok { + that2, ok := that.(TimeSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } + } + return true +} +func (this *LabelPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelPair) + if !ok { + that2, ok := that.(LabelPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Name, that1.Name) { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Sample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sample) + if !ok { + that2, ok := that.(Sample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if this.TimestampMs != that1.TimestampMs { + return false + } + return true +} +func (this *LabelMatchers) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelMatchers) + if !ok { + that2, ok := that.(LabelMatchers) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } + } + return true +} +func (this *Metric) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Metric) + if !ok { + that2, ok := that.(Metric) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + return true +} +func (this *LabelMatcher) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelMatcher) + if !ok { + that2, ok := that.(LabelMatcher) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *TimeSeriesFile) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesFile) + if !ok { + that2, ok := that.(TimeSeriesFile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if this.Filename != that1.Filename { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferTSDBResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferTSDBResponse) + if !ok { + that2, ok := that.(TransferTSDBResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *WriteRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.WriteRequest{") + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *WriteResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.WriteResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ReadRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.ReadRequest{") + if this.Queries != nil { + s = append(s, "Queries: "+fmt.Sprintf("%#v", this.Queries)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ReadResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.ReadResponse{") + if this.Results != nil { + s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.QueryRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.QueryResponse{") + if this.Timeseries != nil { + vs := make([]*TimeSeries, len(this.Timeseries)) + for i := range vs { + vs[i] = &this.Timeseries[i] + } + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryStreamResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.QueryStreamResponse{") + if this.Timeseries != nil { + vs := make([]*TimeSeriesChunk, len(this.Timeseries)) + for i := range vs { + vs[i] = &this.Timeseries[i] + } + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValuesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesRequest{") + s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValuesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesResponse{") + s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelNamesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.LabelNamesRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelNamesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelNamesResponse{") + s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserStatsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.UserStatsRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.UserStatsResponse{") + s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") + s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") + s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") + s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UserIDStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.UserIDStatsResponse{") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UsersStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.UsersStatsResponse{") + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetricsForLabelMatchersRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.MetricsForLabelMatchersRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.MatchersSet != nil { + s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetricsForLabelMatchersResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.MetricsForLabelMatchersResponse{") + if this.Metric != nil { + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesChunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.TimeSeriesChunk{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Chunks != nil { + vs := make([]*Chunk, len(this.Chunks)) + for i := range vs { + vs[i] = &this.Chunks[i] + } + s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Chunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.Chunk{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferChunksResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.TransferChunksResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeries) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.TimeSeries{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelPair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.LabelPair{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.Sample{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelMatchers) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelMatchers{") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Metric) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.Metric{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelMatcher) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.LabelMatcher{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesFile) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.TimeSeriesFile{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferTSDBResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.TransferTSDBResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringCortex(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IngesterClient is the client API for Ingester service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngesterClient interface { + Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) + QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) + LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) + LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) + UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) + AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) + MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) + // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). + TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) +} + +type ingesterClient struct { + cc *grpc.ClientConn +} + +func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { + return &ingesterClient{cc} +} + +func (c *ingesterClient) Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { + out := new(QueryResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) + if err != nil { + return nil, err + } + x := &ingesterQueryStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Ingester_QueryStreamClient interface { + Recv() (*QueryStreamResponse, error) + grpc.ClientStream +} + +type ingesterQueryStreamClient struct { + grpc.ClientStream +} + +func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { + m := new(QueryStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { + out := new(LabelValuesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { + out := new(LabelNamesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { + out := new(UserStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { + out := new(UsersStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { + out := new(MetricsForLabelMatchersResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/TransferChunks", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferChunksClient{stream} + return x, nil +} + +type Ingester_TransferChunksClient interface { + Send(*TimeSeriesChunk) error + CloseAndRecv() (*TransferChunksResponse, error) + grpc.ClientStream +} + +type ingesterTransferChunksClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferChunksResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingesterClient) TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/TransferTSDB", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferTSDBClient{stream} + return x, nil +} + +type Ingester_TransferTSDBClient interface { + Send(*TimeSeriesFile) error + CloseAndRecv() (*TransferTSDBResponse, error) + grpc.ClientStream +} + +type ingesterTransferTSDBClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferTSDBClient) Send(m *TimeSeriesFile) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBClient) CloseAndRecv() (*TransferTSDBResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferTSDBResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// IngesterServer is the server API for Ingester service. +type IngesterServer interface { + Push(context.Context, *WriteRequest) (*WriteResponse, error) + Query(context.Context, *QueryRequest) (*QueryResponse, error) + QueryStream(*QueryRequest, Ingester_QueryStreamServer) error + LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) + LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) + UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) + AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) + MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) + // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). + TransferChunks(Ingester_TransferChunksServer) error + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(Ingester_TransferTSDBServer) error +} + +// UnimplementedIngesterServer can be embedded to have forward compatible implementations. +type UnimplementedIngesterServer struct { +} + +func (*UnimplementedIngesterServer) Push(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") +} +func (*UnimplementedIngesterServer) Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { + return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") +} +func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") +} +func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") +} +func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") +} +func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") +} +func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") +} +func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { + return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") +} +func (*UnimplementedIngesterServer) TransferTSDB(srv Ingester_TransferTSDBServer) error { + return status.Errorf(codes.Unimplemented, "method TransferTSDB not implemented") +} + +func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { + s.RegisterService(&_Ingester_serviceDesc, srv) +} + +func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).Push(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/Push", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).Push(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).Query(ctx, req.(*QueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) +} + +type Ingester_QueryStreamServer interface { + Send(*QueryStreamResponse) error + grpc.ServerStream +} + +type ingesterQueryStreamServer struct { + grpc.ServerStream +} + +func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelValuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).LabelValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).LabelNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).UserStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/UserStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).AllUserStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/AllUserStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsForLabelMatchersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) +} + +type Ingester_TransferChunksServer interface { + SendAndClose(*TransferChunksResponse) error + Recv() (*TimeSeriesChunk, error) + grpc.ServerStream +} + +type ingesterTransferChunksServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { + m := new(TimeSeriesChunk) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Ingester_TransferTSDB_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferTSDB(&ingesterTransferTSDBServer{stream}) +} + +type Ingester_TransferTSDBServer interface { + SendAndClose(*TransferTSDBResponse) error + Recv() (*TimeSeriesFile, error) + grpc.ServerStream +} + +type ingesterTransferTSDBServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferTSDBServer) SendAndClose(m *TransferTSDBResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBServer) Recv() (*TimeSeriesFile, error) { + m := new(TimeSeriesFile) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Ingester_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cortex.Ingester", + HandlerType: (*IngesterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _Ingester_Push_Handler, + }, + { + MethodName: "Query", + Handler: _Ingester_Query_Handler, + }, + { + MethodName: "LabelValues", + Handler: _Ingester_LabelValues_Handler, + }, + { + MethodName: "LabelNames", + Handler: _Ingester_LabelNames_Handler, + }, + { + MethodName: "UserStats", + Handler: _Ingester_UserStats_Handler, + }, + { + MethodName: "AllUserStats", + Handler: _Ingester_AllUserStats_Handler, + }, + { + MethodName: "MetricsForLabelMatchers", + Handler: _Ingester_MetricsForLabelMatchers_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "QueryStream", + Handler: _Ingester_QueryStream_Handler, + ServerStreams: true, + }, + { + StreamName: "TransferChunks", + Handler: _Ingester_TransferChunks_Handler, + ClientStreams: true, + }, + { + StreamName: "TransferTSDB", + Handler: _Ingester_TransferTSDB_Handler, + ClientStreams: true, + }, + }, + Metadata: "cortex.proto", +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Source != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x10 + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Timeseries[iNdEx].Size() + i -= size + if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WriteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelName) > 0 { + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelValues) > 0 { + for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelValues[iNdEx]) + copy(dAtA[i:], m.LabelValues[iNdEx]) + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelNames) > 0 { + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuleIngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) + i-- + dAtA[i] = 0x21 + } + if m.ApiIngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) + i-- + dAtA[i] = 0x19 + } + if m.NumSeries != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) + i-- + dAtA[i] = 0x10 + } + if m.IngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchersSet) > 0 { + for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0x12 + } + if len(m.FromIngesterId) > 0 { + i -= len(m.FromIngesterId) + copy(dAtA[i:], m.FromIngesterId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.Encoding != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Encoding)) + i-- + dAtA[i] = 0x18 + } + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeriesFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if len(m.Filename) > 0 { + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0x1a + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0x12 + } + if len(m.FromIngesterId) > 0 { + i -= len(m.FromIngesterId) + copy(dAtA[i:], m.FromIngesterId) + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TransferTSDBResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { + offset -= sovCortex(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if m.Source != 0 { + n += 1 + sovCortex(uint64(m.Source)) + } + return n +} + +func (m *WriteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *ReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *QueryStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelValuesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *LabelValuesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + l = len(s) + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelNamesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LabelNamesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *UserStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *UserStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IngestionRate != 0 { + n += 9 + } + if m.NumSeries != 0 { + n += 1 + sovCortex(uint64(m.NumSeries)) + } + if m.ApiIngestionRate != 0 { + n += 9 + } + if m.RuleIngestionRate != 0 { + n += 9 + } + return n +} + +func (m *UserIDStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *UsersStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *MetricsForLabelMatchersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if len(m.MatchersSet) > 0 { + for _, e := range m.MatchersSet { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *MetricsForLabelMatchersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *TimeSeriesChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } + if m.Encoding != 0 { + n += 1 + sovCortex(uint64(m.Encoding)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TransferChunksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.TimestampMs != 0 { + n += 1 + sovCortex(uint64(m.TimestampMs)) + } + return n +} + +func (m *LabelMatchers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCortex(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TimeSeriesFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TransferTSDBResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovCortex(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCortex(x uint64) (n int) { + return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WriteRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteRequest{`, + `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *WriteResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteResponse{`, + `}`, + }, "") + return s +} +func (this *ReadRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForQueries := "[]*QueryRequest{" + for _, f := range this.Queries { + repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + "," + } + repeatedStringForQueries += "}" + s := strings.Join([]string{`&ReadRequest{`, + `Queries:` + repeatedStringForQueries + `,`, + `}`, + }, "") + return s +} +func (this *ReadResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]*QueryResponse{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + "," + } + repeatedStringForResults += "}" + s := strings.Join([]string{`&ReadResponse{`, + `Results:` + repeatedStringForResults + `,`, + `}`, + }, "") + return s +} +func (this *QueryRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&QueryRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `}`, + }, "") + return s +} +func (this *QueryResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTimeseries := "[]TimeSeries{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" + s := strings.Join([]string{`&QueryResponse{`, + `Timeseries:` + repeatedStringForTimeseries + `,`, + `}`, + }, "") + return s +} +func (this *QueryStreamResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTimeseries := "[]TimeSeriesChunk{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + "," + } + repeatedStringForTimeseries += "}" + s := strings.Join([]string{`&QueryStreamResponse{`, + `Timeseries:` + repeatedStringForTimeseries + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesRequest{`, + `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesResponse{`, + `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, + `}`, + }, "") + return s +} +func (this *LabelNamesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesRequest{`, + `}`, + }, "") + return s +} +func (this *LabelNamesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesResponse{`, + `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, + `}`, + }, "") + return s +} +func (this *UserStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsRequest{`, + `}`, + }, "") + return s +} +func (this *UserStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsResponse{`, + `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, + `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, + `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, + `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, + `}`, + }, "") + return s +} +func (this *UserIDStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIDStatsResponse{`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UsersStatsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStats := "[]*UserIDStatsResponse{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," + } + repeatedStringForStats += "}" + s := strings.Join([]string{`&UsersStatsResponse{`, + `Stats:` + repeatedStringForStats + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchersSet := "[]*LabelMatchers{" + for _, f := range this.MatchersSet { + repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + } + repeatedStringForMatchersSet += "}" + s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `MatchersSet:` + repeatedStringForMatchersSet + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetric := "[]*Metric{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + "," + } + repeatedStringForMetric += "}" + s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, + `Metric:` + repeatedStringForMetric + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesChunk) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," + } + repeatedStringForChunks += "}" + s := strings.Join([]string{`&TimeSeriesChunk{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, + `}`, + }, "") + return s +} +func (this *Chunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Chunk{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferChunksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferChunksResponse{`, + `}`, + }, "") + return s +} +func (this *TimeSeries) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&TimeSeries{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *LabelPair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelPair{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatchers) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&LabelMatchers{`, + `Matchers:` + repeatedStringForMatchers + `,`, + `}`, + }, "") + return s +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatcher) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelMatcher{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeriesFile{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferTSDBResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferTSDBResponse{`, + `}`, + }, "") + return s +} +func valueToStringCortex(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, PreallocTimeseries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + m.Source = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &QueryRequest{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResponse{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeriesChunk{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.IngestionRate = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) + } + m.NumSeries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumSeries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ApiIngestionRate = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.RuleIngestionRate = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &UserStatsResponse{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &UserIDStatsResponse{}) + if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) + if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, &Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) + } + m.Encoding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Encoding |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatchers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MatchType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferTSDBResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferTSDBResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferTSDBResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCortex(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCortex + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCortex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCortex(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthCortex + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow") +) diff --git a/integration-tests/proto/1.3.0/timeseries.go b/integration-tests/proto/1.3.0/timeseries.go new file mode 100644 index 0000000000..204382952b --- /dev/null +++ b/integration-tests/proto/1.3.0/timeseries.go @@ -0,0 +1,256 @@ +package client + +import ( + "flag" + "fmt" + "io" + "strings" + "sync" + "unsafe" + + "github.com/prometheus/prometheus/pkg/labels" +) + +var ( + expectedTimeseries = 100 + expectedLabels = 20 + expectedSamplesPerSeries = 10 + + slicePool = sync.Pool{ + New: func() interface{} { + return make([]PreallocTimeseries, 0, expectedTimeseries) + }, + } + + timeSeriesPool = sync.Pool{ + New: func() interface{} { + return &TimeSeries{ + Labels: make([]LabelAdapter, 0, expectedLabels), + Samples: make([]Sample, 0, expectedSamplesPerSeries), + } + }, + } +) + +// PreallocConfig configures how structures will be preallocated to optimise +// proto unmarshalling. +type PreallocConfig struct{} + +// RegisterFlags registers configuration settings. +func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, use for preallocations.") + f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.") + f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.") +} + +// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal. +type PreallocWriteRequest struct { + WriteRequest +} + +// Unmarshal implements proto.Message. +func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error { + p.Timeseries = slicePool.Get().([]PreallocTimeseries) + return p.WriteRequest.Unmarshal(dAtA) +} + +// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal. +type PreallocTimeseries struct { + *TimeSeries +} + +// Unmarshal implements proto.Message. +func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { + p.TimeSeries = timeSeriesPool.Get().(*TimeSeries) + return p.TimeSeries.Unmarshal(dAtA) +} + +// LabelAdapter is a labels.Label that can be marshalled to/from protos. +type LabelAdapter labels.Label + +// Marshal implements proto.Marshaller. +func (bs *LabelAdapter) Marshal() ([]byte, error) { + buf := make([]byte, bs.Size()) + _, err := bs.MarshalTo(buf) + return buf, err +} + +// MarshalTo implements proto.Marshaller. +func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { + var i int + ls := (*labels.Label)(bs) + + buf[i] = 0xa + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) + i += copy(buf[i:], ls.Name) + + buf[i] = 0x12 + i++ + i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) + i += copy(buf[i:], ls.Value) + + return i, nil +} + +// Unmarshal a LabelAdapater, implements proto.Unmarshaller. +// NB this is a copy of the autogenerated code to unmarshal a LabelPair, +// with the byte copying replaced with a yoloString. +func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + bs.Value = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func yoloString(buf []byte) string { + return *((*string)(unsafe.Pointer(&buf))) +} + +// Size implements proto.Sizer. +func (bs *LabelAdapter) Size() int { + ls := (*labels.Label)(bs) + var n int + l := len(ls.Name) + n += 1 + l + sovCortex(uint64(l)) + l = len(ls.Value) + n += 1 + l + sovCortex(uint64(l)) + return n +} + +// Equal implements proto.Equaler. +func (bs *LabelAdapter) Equal(other LabelAdapter) bool { + return bs.Name == other.Name && bs.Value == other.Value +} + +// Compare implements proto.Comparer. +func (bs *LabelAdapter) Compare(other LabelAdapter) int { + if c := strings.Compare(bs.Name, other.Name); c != 0 { + return c + } + return strings.Compare(bs.Value, other.Value) +} + +// ReuseSlice puts the slice back into a sync.Pool for reuse. +func ReuseSlice(slice []PreallocTimeseries) { + for i := range slice { + ReuseTimeseries(slice[i].TimeSeries) + } + slicePool.Put(slice[:0]) +} + +// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. +func ReuseTimeseries(ts *TimeSeries) { + ts.Labels = ts.Labels[:0] + ts.Samples = ts.Samples[:0] + timeSeriesPool.Put(ts) +} diff --git a/integration-tests/proto/proto_test.go b/integration-tests/proto/proto_test.go new file mode 100644 index 0000000000..ab9de1ccbe --- /dev/null +++ b/integration-tests/proto/proto_test.go @@ -0,0 +1,246 @@ +package main + +import ( + "context" + "log" + "net" + "testing" + + proto12 "github.com/cortexproject/cortex/integration-tests/proto/1.2.1" + proto13 "github.com/cortexproject/cortex/integration-tests/proto/1.3.0" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +const bufSize = 1024 * 1024 + +var ( + lis *bufconn.Listener + server *grpc.Server + labelNames = []string{"foo", "fuzz", "bar", "buzz"} + labelValues = []string{"lfoo", "lfuzz", "lbar", "lbuzz"} +) + +func init() { + lis = bufconn.Listen(bufSize) + server = grpc.NewServer() +} + +func bufDialer(context.Context, string) (net.Conn, error) { + return lis.Dial() +} + +func Test_ProtoCompat(t *testing.T) { + ctx := context.Background() + conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Failed to dial bufnet: %v", err) + } + defer conn.Close() + + // 1.3.0 server + proto13.RegisterIngesterServer(server, &fakeIngesterServer{t}) + // 1.2.1 client + client := proto12.NewIngesterClient(conn) + + go func() { + if err := server.Serve(lis); err != nil { + log.Fatalf("Server exited with error: %v", err) + } + }() + + l, err := client.LabelNames(ctx, &proto12.LabelNamesRequest{}) + if err != nil { + t.Fatal(err) + } + require.Equal(t, labelNames, l.LabelNames) + + v, err := client.LabelValues(ctx, &proto12.LabelValuesRequest{LabelName: "foo"}) + if err != nil { + t.Fatal(err) + } + require.Equal(t, labelValues, v.LabelValues) + + s, err := client.UserStats(ctx, &proto12.UserStatsRequest{}) + if err != nil { + t.Fatal(err) + } + require.Equal(t, &proto12.UserStatsResponse{ + IngestionRate: 10.5, + NumSeries: 1000000, + ApiIngestionRate: 20.245, + RuleIngestionRate: 39.033, + }, s) + + r, err := client.Push(ctx, &proto12.WriteRequest{ + Source: proto12.RULE, + Timeseries: []proto12.PreallocTimeseries{ + proto12.PreallocTimeseries{ + TimeSeries: &proto12.TimeSeries{ + Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "foo", Value: "bar"}, + proto12.LabelAdapter{Name: "fuzz", Value: "buzz"}, + }, + Samples: []proto12.Sample{ + proto12.Sample{Value: 10, TimestampMs: 1}, + proto12.Sample{Value: 10000, TimestampMs: 2}, + }, + }, + }, + proto12.PreallocTimeseries{ + TimeSeries: &proto12.TimeSeries{ + Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "foo", Value: "buzz"}, + proto12.LabelAdapter{Name: "fuzz", Value: "bar"}, + }, + Samples: []proto12.Sample{ + proto12.Sample{Value: 20.1234, TimestampMs: 1}, + proto12.Sample{Value: 25.1233, TimestampMs: 2}, + }, + }, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, r) + q, err := client.Query(ctx, &proto12.QueryRequest{ + StartTimestampMs: 1111, + EndTimestampMs: 20000, + Matchers: []*proto12.LabelMatcher{ + &proto12.LabelMatcher{Type: proto12.REGEX_MATCH, Name: "foo", Value: "bar"}, + &proto12.LabelMatcher{Type: proto12.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, + }, + }) + if err != nil { + t.Fatal(err) + } + require.Equal(t, &proto12.QueryResponse{ + Timeseries: []proto12.TimeSeries{ + proto12.TimeSeries{ + Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "foo", Value: "bar"}, + proto12.LabelAdapter{Name: "fuzz", Value: "buzz"}, + }, + Samples: []proto12.Sample{ + proto12.Sample{Value: 10, TimestampMs: 1}, + proto12.Sample{Value: 10000, TimestampMs: 2}, + }, + }, + }, + }, q) +} + +func Test_LabelAdapter(t *testing.T) { + l3 := proto13.LabelAdapter{Name: "foo", Value: "bar"} + b3, err := l3.Marshal() + if err != nil { + t.Fatal(err) + } + t.Log(b3) + l2 := proto13.LabelAdapter{} + err = l2.Unmarshal(b3) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, l3, l2) +} + +type fakeIngesterServer struct { + t *testing.T +} + +func (f *fakeIngesterServer) LabelValues(_ context.Context, v *proto13.LabelValuesRequest) (*proto13.LabelValuesResponse, error) { + require.Equal(f.t, "foo", v.LabelName) + return &proto13.LabelValuesResponse{ + LabelValues: labelValues, + }, nil +} +func (fakeIngesterServer) LabelNames(context.Context, *proto13.LabelNamesRequest) (*proto13.LabelNamesResponse, error) { + return &proto13.LabelNamesResponse{ + LabelNames: labelNames, + }, nil +} +func (fakeIngesterServer) UserStats(context.Context, *proto13.UserStatsRequest) (*proto13.UserStatsResponse, error) { + return &proto13.UserStatsResponse{ + IngestionRate: 10.5, + NumSeries: 1000000, + ApiIngestionRate: 20.245, + RuleIngestionRate: 39.033, + }, nil +} +func (f *fakeIngesterServer) Push(_ context.Context, req *proto13.WriteRequest) (*proto13.WriteResponse, error) { + require.Equal(f.t, &proto13.WriteRequest{ + Source: proto13.RULE, + Timeseries: []proto13.PreallocTimeseries{ + proto13.PreallocTimeseries{ + TimeSeries: &proto13.TimeSeries{ + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "foo", Value: "bar"}, + proto13.LabelAdapter{Name: "fuzz", Value: "buzz"}, + }, + Samples: []proto13.Sample{ + proto13.Sample{Value: 10, TimestampMs: 1}, + proto13.Sample{Value: 10000, TimestampMs: 2}, + }, + }, + }, + proto13.PreallocTimeseries{ + TimeSeries: &proto13.TimeSeries{ + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "foo", Value: "buzz"}, + proto13.LabelAdapter{Name: "fuzz", Value: "bar"}, + }, + Samples: []proto13.Sample{ + proto13.Sample{Value: 20.1234, TimestampMs: 1}, + proto13.Sample{Value: 25.1233, TimestampMs: 2}, + }, + }, + }, + }, + }, req) + return &proto13.WriteResponse{}, nil +} +func (f *fakeIngesterServer) Query(_ context.Context, req *proto13.QueryRequest) (*proto13.QueryResponse, error) { + require.Equal(f.t, &proto13.QueryRequest{ + StartTimestampMs: 1111, + EndTimestampMs: 20000, + Matchers: []*proto13.LabelMatcher{ + &proto13.LabelMatcher{Type: proto13.REGEX_MATCH, Name: "foo", Value: "bar"}, + &proto13.LabelMatcher{Type: proto13.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, + }, + }, req) + return &proto13.QueryResponse{ + Timeseries: []proto13.TimeSeries{ + proto13.TimeSeries{ + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "foo", Value: "bar"}, + proto13.LabelAdapter{Name: "fuzz", Value: "buzz"}, + }, + Samples: []proto13.Sample{ + proto13.Sample{Value: 10, TimestampMs: 1}, + proto13.Sample{Value: 10000, TimestampMs: 2}, + }, + }, + }, + }, nil +} +func (fakeIngesterServer) QueryStream(*proto13.QueryRequest, proto13.Ingester_QueryStreamServer) error { + return nil +} +func (fakeIngesterServer) AllUserStats(context.Context, *proto13.UserStatsRequest) (*proto13.UsersStatsResponse, error) { + return nil, nil +} +func (fakeIngesterServer) MetricsForLabelMatchers(context.Context, *proto13.MetricsForLabelMatchersRequest) (*proto13.MetricsForLabelMatchersResponse, error) { + return nil, nil +} +func (fakeIngesterServer) TransferChunks(proto13.Ingester_TransferChunksServer) error { + return nil +} + +// TransferTSDB transfers all files of a tsdb to a joining ingester +func (fakeIngesterServer) TransferTSDB(proto13.Ingester_TransferTSDBServer) error { + return nil +} From 3a9d0706f894b03c77136f94aaa26fee002ed7c9 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 31 Jan 2020 15:42:10 -0500 Subject: [PATCH 04/20] Fixes compatibility issue with labelpair custom type Signed-off-by: Cyril Tovena --- integration-tests/proto/1.3.0/timeseries.go | 61 +++++++++++++-------- integration-tests/proto/proto_test.go | 54 +++++++++++++++--- pkg/ingester/client/timeseries.go | 61 +++++++++++++-------- 3 files changed, 125 insertions(+), 51 deletions(-) diff --git a/integration-tests/proto/1.3.0/timeseries.go b/integration-tests/proto/1.3.0/timeseries.go index 204382952b..76f56ceaf5 100644 --- a/integration-tests/proto/1.3.0/timeseries.go +++ b/integration-tests/proto/1.3.0/timeseries.go @@ -70,27 +70,36 @@ type LabelAdapter labels.Label // Marshal implements proto.Marshaller. func (bs *LabelAdapter) Marshal() ([]byte, error) { - buf := make([]byte, bs.Size()) - _, err := bs.MarshalTo(buf) - return buf, err + size := bs.Size() + buf := make([]byte, size) + n, err := bs.MarshalToSizedBuffer(buf[:size]) + return buf[:n], err +} + +func (m *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } // MarshalTo implements proto.Marshaller. -func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { - var i int +func (bs *LabelAdapter) MarshalToSizedBuffer(buf []byte) (n int, err error) { ls := (*labels.Label)(bs) - - buf[i] = 0xa - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) - i += copy(buf[i:], ls.Name) - - buf[i] = 0x12 - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) - i += copy(buf[i:], ls.Value) - - return i, nil + i := len(buf) + if len(ls.Value) > 0 { + i -= len(ls.Value) + copy(buf[i:], ls.Value) + i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) + i-- + buf[i] = 0x12 + } + if len(ls.Name) > 0 { + i -= len(ls.Name) + copy(buf[i:], ls.Name) + i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) + i-- + buf[i] = 0xa + } + return len(buf) - i, nil } // Unmarshal a LabelAdapater, implements proto.Unmarshaller. @@ -217,13 +226,21 @@ func yoloString(buf []byte) string { } // Size implements proto.Sizer. -func (bs *LabelAdapter) Size() int { +func (bs *LabelAdapter) Size() (n int) { ls := (*labels.Label)(bs) - var n int - l := len(ls.Name) - n += 1 + l + sovCortex(uint64(l)) + if bs == nil { + return 0 + } + var l int + _ = l + l = len(ls.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } l = len(ls.Value) - n += 1 + l + sovCortex(uint64(l)) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } return n } diff --git a/integration-tests/proto/proto_test.go b/integration-tests/proto/proto_test.go index ab9de1ccbe..916816c9bc 100644 --- a/integration-tests/proto/proto_test.go +++ b/integration-tests/proto/proto_test.go @@ -133,19 +133,59 @@ func Test_ProtoCompat(t *testing.T) { }, q) } -func Test_LabelAdapter(t *testing.T) { - l3 := proto13.LabelAdapter{Name: "foo", Value: "bar"} - b3, err := l3.Marshal() +func Test_CustomMarshal(t *testing.T) { + l13 := proto13.LabelAdapter{Name: "foo", Value: "bar"} + b3, err := l13.Marshal() if err != nil { t.Fatal(err) } - t.Log(b3) - l2 := proto13.LabelAdapter{} - err = l2.Unmarshal(b3) + l12 := proto12.LabelAdapter{} + err = l12.Unmarshal(b3) if err != nil { t.Fatal(err) } - require.EqualValues(t, l3, l2) + require.EqualValues(t, l13, l12) + + b2, err := l12.Marshal() + if err != nil { + t.Fatal(err) + } + err = l13.Unmarshal(b2) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, l13, l12) + + t13 := proto13.TimeSeries{ + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "1", Value: "2"}, + }, + Samples: []proto13.Sample{ + proto13.Sample{Value: 10, TimestampMs: 1}, + proto13.Sample{Value: 10000, TimestampMs: 2}, + }, + } + bt13, err := t13.Marshal() + if err != nil { + t.Fatal(err) + } + t12 := proto12.TimeSeries{} + err = t12.Unmarshal(bt13) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, t13, t12) + + bt12, err := t12.Marshal() + if err != nil { + t.Fatal(err) + } + t13 = proto13.TimeSeries{} + err = t13.Unmarshal(bt12) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, t13, t12) } type fakeIngesterServer struct { diff --git a/pkg/ingester/client/timeseries.go b/pkg/ingester/client/timeseries.go index 99cede9c91..445689c24a 100644 --- a/pkg/ingester/client/timeseries.go +++ b/pkg/ingester/client/timeseries.go @@ -70,27 +70,36 @@ type LabelAdapter labels.Label // Marshal implements proto.Marshaller. func (bs *LabelAdapter) Marshal() ([]byte, error) { - buf := make([]byte, bs.Size()) - _, err := bs.MarshalTo(buf) - return buf, err + size := bs.Size() + buf := make([]byte, size) + n, err := bs.MarshalToSizedBuffer(buf[:size]) + return buf[:n], err +} + +func (m *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } // MarshalTo implements proto.Marshaller. -func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { - var i int +func (bs *LabelAdapter) MarshalToSizedBuffer(buf []byte) (n int, err error) { ls := (*labels.Label)(bs) - - buf[i] = 0xa - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) - i += copy(buf[i:], ls.Name) - - buf[i] = 0x12 - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) - i += copy(buf[i:], ls.Value) - - return i, nil + i := len(buf) + if len(ls.Value) > 0 { + i -= len(ls.Value) + copy(buf[i:], ls.Value) + i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) + i-- + buf[i] = 0x12 + } + if len(ls.Name) > 0 { + i -= len(ls.Name) + copy(buf[i:], ls.Name) + i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) + i-- + buf[i] = 0xa + } + return len(buf) - i, nil } // Unmarshal a LabelAdapter, implements proto.Unmarshaller. @@ -217,13 +226,21 @@ func yoloString(buf []byte) string { } // Size implements proto.Sizer. -func (bs *LabelAdapter) Size() int { +func (bs *LabelAdapter) Size() (n int) { ls := (*labels.Label)(bs) - var n int - l := len(ls.Name) - n += 1 + l + sovCortex(uint64(l)) + if bs == nil { + return 0 + } + var l int + _ = l + l = len(ls.Name) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } l = len(ls.Value) - n += 1 + l + sovCortex(uint64(l)) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } return n } From 129cfbb885e36cfc7837cc383e64ab1d4c770999 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 31 Jan 2020 16:27:01 -0500 Subject: [PATCH 05/20] Full ingester tests. Signed-off-by: Cyril Tovena --- integration-tests/proto/marshal_test.go | 64 +++++++ integration-tests/proto/proto_test.go | 229 ++++++++++++++++++++---- 2 files changed, 254 insertions(+), 39 deletions(-) create mode 100644 integration-tests/proto/marshal_test.go diff --git a/integration-tests/proto/marshal_test.go b/integration-tests/proto/marshal_test.go new file mode 100644 index 0000000000..fc5e3ec59d --- /dev/null +++ b/integration-tests/proto/marshal_test.go @@ -0,0 +1,64 @@ +package main + +import ( + "testing" + + proto12 "github.com/cortexproject/cortex/integration-tests/proto/1.2.1" + proto13 "github.com/cortexproject/cortex/integration-tests/proto/1.3.0" + "github.com/stretchr/testify/require" +) + +func Test_CustomMarshal(t *testing.T) { + l13 := proto13.LabelAdapter{Name: "foo", Value: "bar"} + b3, err := l13.Marshal() + if err != nil { + t.Fatal(err) + } + l12 := proto12.LabelAdapter{} + err = l12.Unmarshal(b3) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, l13, l12) + + b2, err := l12.Marshal() + if err != nil { + t.Fatal(err) + } + err = l13.Unmarshal(b2) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, l13, l12) + + t13 := proto13.TimeSeries{ + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "1", Value: "2"}, + }, + Samples: []proto13.Sample{ + proto13.Sample{Value: 10, TimestampMs: 1}, + proto13.Sample{Value: 10000, TimestampMs: 2}, + }, + } + bt13, err := t13.Marshal() + if err != nil { + t.Fatal(err) + } + t12 := proto12.TimeSeries{} + err = t12.Unmarshal(bt13) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, t13, t12) + + bt12, err := t12.Marshal() + if err != nil { + t.Fatal(err) + } + t13 = proto13.TimeSeries{} + err = t13.Unmarshal(bt12) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, t13, t12) +} diff --git a/integration-tests/proto/proto_test.go b/integration-tests/proto/proto_test.go index 916816c9bc..2b7aa4affe 100644 --- a/integration-tests/proto/proto_test.go +++ b/integration-tests/proto/proto_test.go @@ -4,6 +4,7 @@ import ( "context" "log" "net" + "sync" "testing" proto12 "github.com/cortexproject/cortex/integration-tests/proto/1.2.1" @@ -20,6 +21,7 @@ var ( server *grpc.Server labelNames = []string{"foo", "fuzz", "bar", "buzz"} labelValues = []string{"lfoo", "lfuzz", "lbar", "lbuzz"} + wg = sync.WaitGroup{} ) func init() { @@ -131,61 +133,123 @@ func Test_ProtoCompat(t *testing.T) { }, }, }, q) -} -func Test_CustomMarshal(t *testing.T) { - l13 := proto13.LabelAdapter{Name: "foo", Value: "bar"} - b3, err := l13.Marshal() - if err != nil { - t.Fatal(err) - } - l12 := proto12.LabelAdapter{} - err = l12.Unmarshal(b3) + stream, err := client.QueryStream(ctx, &proto12.QueryRequest{ + StartTimestampMs: 1111, + EndTimestampMs: 20000, + Matchers: []*proto12.LabelMatcher{ + &proto12.LabelMatcher{Type: proto12.REGEX_MATCH, Name: "foo", Value: "bar"}, + &proto12.LabelMatcher{Type: proto12.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, + }, + }) if err != nil { t.Fatal(err) } - require.EqualValues(t, l13, l12) - - b2, err := l12.Marshal() + streamResponse, err := stream.Recv() if err != nil { t.Fatal(err) } - err = l13.Unmarshal(b2) + require.Equal(t, &proto12.QueryStreamResponse{ + Timeseries: []proto12.TimeSeriesChunk{ + proto12.TimeSeriesChunk{ + FromIngesterId: "1", + UserId: "2", + Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "foo", Value: "barr"}, + }, + Chunks: []proto12.Chunk{ + proto12.Chunk{ + StartTimestampMs: 1, + EndTimestampMs: 1000, + Encoding: 5, + Data: []byte{1, 1, 3, 5, 12}, + }, + }, + }, + }, + }, streamResponse) + stats, err := client.AllUserStats(ctx, &proto12.UserStatsRequest{}) if err != nil { t.Fatal(err) } - require.EqualValues(t, l13, l12) - - t13 := proto13.TimeSeries{ - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "1", Value: "2"}, + require.Equal(t, &proto12.UsersStatsResponse{ + Stats: []*proto12.UserIDStatsResponse{ + &proto12.UserIDStatsResponse{ + UserId: "fake", + Data: &proto12.UserStatsResponse{ + IngestionRate: 1, + NumSeries: 2, + ApiIngestionRate: 2, + RuleIngestionRate: 1.223, + }, + }, }, - Samples: []proto13.Sample{ - proto13.Sample{Value: 10, TimestampMs: 1}, - proto13.Sample{Value: 10000, TimestampMs: 2}, + }, stats) + + m, err := client.MetricsForLabelMatchers(ctx, &proto12.MetricsForLabelMatchersRequest{ + StartTimestampMs: 1, + EndTimestampMs: 2, + MatchersSet: []*proto12.LabelMatchers{ + &proto12.LabelMatchers{ + Matchers: []*proto12.LabelMatcher{ + &proto12.LabelMatcher{ + Type: proto12.REGEX_MATCH, + Name: "foo", + Value: "buzz", + }, + }, + }, }, - } - bt13, err := t13.Marshal() - if err != nil { - t.Fatal(err) - } - t12 := proto12.TimeSeries{} - err = t12.Unmarshal(bt13) + }) if err != nil { t.Fatal(err) } - require.EqualValues(t, t13, t12) + require.Equal(t, &proto12.MetricsForLabelMatchersResponse{ + Metric: []*proto12.Metric{ + &proto12.Metric{Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "foo", Value: "bar"}, + }}, + }, + }, m) - bt12, err := t12.Marshal() + chunkStream, err := client.TransferChunks(ctx) if err != nil { t.Fatal(err) } - t13 = proto13.TimeSeries{} - err = t13.Unmarshal(bt12) + wg.Add(1) + err = chunkStream.Send(&proto12.TimeSeriesChunk{ + FromIngesterId: "foo", + UserId: "fake", + Labels: []proto12.LabelAdapter{ + proto12.LabelAdapter{Name: "bar", Value: "buzz"}, + }, + Chunks: []proto12.Chunk{ + proto12.Chunk{ + StartTimestampMs: 1, + EndTimestampMs: 2, + Encoding: 3, + Data: []byte{1, 23, 24, 43, 4, 123}, + }, + }, + }) if err != nil { t.Fatal(err) } - require.EqualValues(t, t13, t12) + wg.Wait() + err = stream.CloseSend() + require.NoError(t, err) + tsStream, err := client.TransferTSDB(ctx) + require.NoError(t, err) + wg.Add(1) + err = tsStream.Send(&proto12.TimeSeriesFile{ + FromIngesterId: "string", + UserId: "fake", + Filename: "here", + Data: []byte{1, 2, 3, 5, 6, 7}, + }) + require.NoError(t, err) + wg.Wait() + require.NoError(t, tsStream.CloseSend()) } type fakeIngesterServer struct { @@ -267,20 +331,107 @@ func (f *fakeIngesterServer) Query(_ context.Context, req *proto13.QueryRequest) }, }, nil } -func (fakeIngesterServer) QueryStream(*proto13.QueryRequest, proto13.Ingester_QueryStreamServer) error { +func (f *fakeIngesterServer) QueryStream(req *proto13.QueryRequest, stream proto13.Ingester_QueryStreamServer) error { + require.Equal(f.t, &proto13.QueryRequest{ + StartTimestampMs: 1111, + EndTimestampMs: 20000, + Matchers: []*proto13.LabelMatcher{ + &proto13.LabelMatcher{Type: proto13.REGEX_MATCH, Name: "foo", Value: "bar"}, + &proto13.LabelMatcher{Type: proto13.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, + }, + }, req) + stream.Send(&proto13.QueryStreamResponse{ + Timeseries: []proto13.TimeSeriesChunk{ + proto13.TimeSeriesChunk{ + FromIngesterId: "1", + UserId: "2", + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "foo", Value: "barr"}, + }, + Chunks: []proto13.Chunk{ + proto13.Chunk{ + StartTimestampMs: 1, + EndTimestampMs: 1000, + Encoding: 5, + Data: []byte{1, 1, 3, 5, 12}, + }, + }, + }, + }, + }) return nil } func (fakeIngesterServer) AllUserStats(context.Context, *proto13.UserStatsRequest) (*proto13.UsersStatsResponse, error) { - return nil, nil + return &proto13.UsersStatsResponse{ + Stats: []*proto13.UserIDStatsResponse{ + &proto13.UserIDStatsResponse{ + UserId: "fake", + Data: &proto13.UserStatsResponse{ + IngestionRate: 1, + NumSeries: 2, + ApiIngestionRate: 2, + RuleIngestionRate: 1.223, + }, + }, + }, + }, nil } -func (fakeIngesterServer) MetricsForLabelMatchers(context.Context, *proto13.MetricsForLabelMatchersRequest) (*proto13.MetricsForLabelMatchersResponse, error) { - return nil, nil +func (f *fakeIngesterServer) MetricsForLabelMatchers(_ context.Context, req *proto13.MetricsForLabelMatchersRequest) (*proto13.MetricsForLabelMatchersResponse, error) { + require.Equal(f.t, &proto13.MetricsForLabelMatchersRequest{ + StartTimestampMs: 1, + EndTimestampMs: 2, + MatchersSet: []*proto13.LabelMatchers{ + &proto13.LabelMatchers{ + Matchers: []*proto13.LabelMatcher{ + &proto13.LabelMatcher{ + Type: proto13.REGEX_MATCH, + Name: "foo", + Value: "buzz", + }, + }, + }, + }, + }, req) + return &proto13.MetricsForLabelMatchersResponse{ + Metric: []*proto13.Metric{ + &proto13.Metric{Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "foo", Value: "bar"}, + }}, + }, + }, nil } -func (fakeIngesterServer) TransferChunks(proto13.Ingester_TransferChunksServer) error { +func (f *fakeIngesterServer) TransferChunks(s proto13.Ingester_TransferChunksServer) error { + c, err := s.Recv() + require.NoError(f.t, err) + require.Equal(f.t, &proto13.TimeSeriesChunk{ + FromIngesterId: "foo", + UserId: "fake", + Labels: []proto13.LabelAdapter{ + proto13.LabelAdapter{Name: "bar", Value: "buzz"}, + }, + Chunks: []proto13.Chunk{ + proto13.Chunk{ + StartTimestampMs: 1, + EndTimestampMs: 2, + Encoding: 3, + Data: []byte{1, 23, 24, 43, 4, 123}, + }, + }, + }, c) + wg.Done() return nil } // TransferTSDB transfers all files of a tsdb to a joining ingester -func (fakeIngesterServer) TransferTSDB(proto13.Ingester_TransferTSDBServer) error { +func (f *fakeIngesterServer) TransferTSDB(stream proto13.Ingester_TransferTSDBServer) error { + file, err := stream.Recv() + require.NoError(f.t, err) + require.Equal(f.t, &proto13.TimeSeriesFile{ + FromIngesterId: "string", + UserId: "fake", + Filename: "here", + Data: []byte{1, 2, 3, 5, 6, 7}, + }, file) + wg.Done() return nil } From 8ae691fff89c001108d1eef114799305ee53f144 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Sat, 1 Feb 2020 14:09:49 -0500 Subject: [PATCH 06/20] Move weavework/common to gogoproto 1.3 fork. Signed-off-by: Cyril Tovena --- go.mod | 2 + go.sum | 3 + vendor/github.com/google/btree/btree_mem.go | 76 + vendor/github.com/lib/pq/oid/gen.go | 93 + .../miekg/dns/duplicate_generate.go | 144 + vendor/github.com/miekg/dns/msg_generate.go | 328 + vendor/github.com/miekg/dns/types_generate.go | 285 + .../minio/minio-go/v6/functional_tests.go | 10247 ++++++++++++++++ .../alertmanager/asset/asset_generate.go | 38 + .../weaveworks/common/httpgrpc/httpgrpc.pb.go | 525 +- .../weaveworks/common/httpgrpc/httpgrpc.proto | 2 +- vendor/golang.org/x/net/internal/iana/gen.go | 383 + .../x/net/internal/socket/defs_aix.go | 38 + .../x/net/internal/socket/defs_darwin.go | 36 + .../x/net/internal/socket/defs_dragonfly.go | 36 + .../x/net/internal/socket/defs_freebsd.go | 36 + .../x/net/internal/socket/defs_linux.go | 40 + .../x/net/internal/socket/defs_netbsd.go | 38 + .../x/net/internal/socket/defs_openbsd.go | 36 + .../x/net/internal/socket/defs_solaris.go | 36 + vendor/golang.org/x/net/ipv4/defs_aix.go | 39 + vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 + .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 + vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 + vendor/golang.org/x/net/ipv4/defs_linux.go | 122 + vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 + vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 + vendor/golang.org/x/net/ipv4/gen.go | 199 + vendor/golang.org/x/net/ipv6/defs_aix.go | 82 + vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 + .../golang.org/x/net/ipv6/defs_dragonfly.go | 82 + vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 + vendor/golang.org/x/net/ipv6/defs_linux.go | 147 + vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 + vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 + vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 + vendor/golang.org/x/net/ipv6/gen.go | 199 + vendor/golang.org/x/net/publicsuffix/gen.go | 717 ++ vendor/golang.org/x/sys/unix/mkasm_darwin.go | 78 + vendor/golang.org/x/sys/unix/mkpost.go | 122 + vendor/golang.org/x/sys/unix/mksyscall.go | 402 + .../x/sys/unix/mksyscall_aix_ppc.go | 415 + .../x/sys/unix/mksyscall_aix_ppc64.go | 614 + .../x/sys/unix/mksyscall_solaris.go | 335 + .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 + vendor/golang.org/x/sys/unix/mksysnum.go | 190 + vendor/golang.org/x/sys/unix/types_aix.go | 237 + vendor/golang.org/x/sys/unix/types_darwin.go | 283 + .../golang.org/x/sys/unix/types_dragonfly.go | 263 + vendor/golang.org/x/sys/unix/types_freebsd.go | 400 + vendor/golang.org/x/sys/unix/types_netbsd.go | 290 + vendor/golang.org/x/sys/unix/types_openbsd.go | 283 + vendor/golang.org/x/sys/unix/types_solaris.go | 266 + vendor/golang.org/x/text/unicode/bidi/gen.go | 133 + .../x/text/unicode/bidi/gen_ranges.go | 57 + .../x/text/unicode/bidi/gen_trieval.go | 64 + .../x/text/unicode/norm/maketables.go | 986 ++ .../golang.org/x/text/unicode/norm/triegen.go | 117 + vendor/golang.org/x/text/width/gen.go | 115 + vendor/golang.org/x/text/width/gen_common.go | 96 + vendor/golang.org/x/text/width/gen_trieval.go | 34 + .../x/tools/go/gcexportdata/main.go | 99 + .../x/tools/internal/imports/mkindex.go | 173 + .../x/tools/internal/imports/mkstdlib.go | 128 + .../grpc/test/bufconn/bufconn.go | 308 + vendor/modules.txt | 631 +- 67 files changed, 21836 insertions(+), 495 deletions(-) create mode 100644 vendor/github.com/google/btree/btree_mem.go create mode 100644 vendor/github.com/lib/pq/oid/gen.go create mode 100644 vendor/github.com/miekg/dns/duplicate_generate.go create mode 100644 vendor/github.com/miekg/dns/msg_generate.go create mode 100644 vendor/github.com/miekg/dns/types_generate.go create mode 100644 vendor/github.com/minio/minio-go/v6/functional_tests.go create mode 100644 vendor/github.com/prometheus/alertmanager/asset/asset_generate.go create mode 100644 vendor/golang.org/x/net/internal/iana/gen.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_aix.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_aix.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/gen.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_aix.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/gen.go create mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go create mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go create mode 100644 vendor/golang.org/x/sys/unix/types_aix.go create mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 vendor/golang.org/x/text/width/gen.go create mode 100644 vendor/golang.org/x/text/width/gen_common.go create mode 100644 vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mkindex.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mkstdlib.go create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go diff --git a/go.mod b/go.mod index 3f247cd747..000e3a8ce6 100644 --- a/go.mod +++ b/go.mod @@ -83,3 +83,5 @@ replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110 // Override reference that causes an error from Go proxy - see https://github.com/golang/go/issues/33558 replace k8s.io/client-go => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab + +replace github.com/weaveworks/common => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 diff --git a/go.sum b/go.sum index 774c56a35b..d4549234e8 100644 --- a/go.sum +++ b/go.sum @@ -160,6 +160,8 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 h1:FGV8BkEkS0ah3fN5DJR3VnWb2XVaVwTPiVdPM5QrfqI= +github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= @@ -313,6 +315,7 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/status v1.0.3 h1:WkVBY59mw7qUNTr/bLwO7J2vesJ0rQ2C3tMXrTd3w5M= diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go new file mode 100644 index 0000000000..cb95b7fa1b --- /dev/null +++ b/vendor/github.com/google/btree/btree_mem.go @@ -0,0 +1,76 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// This binary compares memory usage between btree and gollrb. +package main + +import ( + "flag" + "fmt" + "math/rand" + "runtime" + "time" + + "github.com/google/btree" + "github.com/petar/GoLLRB/llrb" +) + +var ( + size = flag.Int("size", 1000000, "size of the tree to build") + degree = flag.Int("degree", 8, "degree of btree") + gollrb = flag.Bool("llrb", false, "use llrb instead of btree") +) + +func main() { + flag.Parse() + vals := rand.Perm(*size) + var t, v interface{} + v = vals + var stats runtime.MemStats + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- BEFORE ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + start := time.Now() + if *gollrb { + tr := llrb.New() + for _, v := range vals { + tr.ReplaceOrInsert(llrb.Int(v)) + } + t = tr // keep it around + } else { + tr := btree.New(*degree) + for _, v := range vals { + tr.ReplaceOrInsert(btree.Int(v)) + } + t = tr // keep it around + } + fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) + fmt.Println("-------- AFTER ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- AFTER GC ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + if t == v { + fmt.Println("to make sure vals and tree aren't GC'd") + } +} diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go new file mode 100644 index 0000000000..7c634cdc5c --- /dev/null +++ b/vendor/github.com/lib/pq/oid/gen.go @@ -0,0 +1,93 @@ +// +build ignore + +// Generate the table of OID values +// Run with 'go run gen.go'. +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "os/exec" + "strings" + + _ "github.com/lib/pq" +) + +// OID represent a postgres Object Identifier Type. +type OID struct { + ID int + Type string +} + +// Name returns an upper case version of the oid type. +func (o OID) Name() string { + return strings.ToUpper(o.Type) +} + +func main() { + datname := os.Getenv("PGDATABASE") + sslmode := os.Getenv("PGSSLMODE") + + if datname == "" { + os.Setenv("PGDATABASE", "pqgotest") + } + + if sslmode == "" { + os.Setenv("PGSSLMODE", "disable") + } + + db, err := sql.Open("postgres", "") + if err != nil { + log.Fatal(err) + } + rows, err := db.Query(` + SELECT typname, oid + FROM pg_type WHERE oid < 10000 + ORDER BY oid; + `) + if err != nil { + log.Fatal(err) + } + oids := make([]*OID, 0) + for rows.Next() { + var oid OID + if err = rows.Scan(&oid.Type, &oid.ID); err != nil { + log.Fatal(err) + } + oids = append(oids, &oid) + } + if err = rows.Err(); err != nil { + log.Fatal(err) + } + cmd := exec.Command("gofmt") + cmd.Stderr = os.Stderr + w, err := cmd.StdinPipe() + if err != nil { + log.Fatal(err) + } + f, err := os.Create("types.go") + if err != nil { + log.Fatal(err) + } + cmd.Stdout = f + err = cmd.Start() + if err != nil { + log.Fatal(err) + } + fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") + fmt.Fprintln(w, "\npackage oid") + fmt.Fprintln(w, "const (") + for _, oid := range oids { + fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) + } + fmt.Fprintln(w, ")") + fmt.Fprintln(w, "var TypeName = map[Oid]string{") + for _, oid := range oids { + fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) + } + fmt.Fprintln(w, "}") + w.Close() + cmd.Wait() +} diff --git a/vendor/github.com/miekg/dns/duplicate_generate.go b/vendor/github.com/miekg/dns/duplicate_generate.go new file mode 100644 index 0000000000..9b7a71b16e --- /dev/null +++ b/vendor/github.com/miekg/dns/duplicate_generate.go @@ -0,0 +1,144 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" +) + +var packageHdr = ` +// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. + +package dns + +` + +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + + if name == "PrivateRR" || name == "OPT" { + continue + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate the duplicate check for each type. + fmt.Fprint(b, "// isDuplicate() functions\n\n") + for _, name := range namedTypes { + + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name) + fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name) + fmt.Fprint(b, "if !ok { return false }\n") + fmt.Fprint(b, "_ = r2\n") + for i := 1; i < st.NumFields(); i++ { + field := st.Field(i).Name() + o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } + o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) } + + // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are + // *indirectly* defined as a slice in the net package). + if _, ok := st.Field(i).Type().(*types.Slice); ok { + o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") + + if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { + o3(`for i := 0; i < len(r1.%s); i++ { + if !isDuplicateName(r1.%s[i], r2.%s[i]) { + return false + } + }`) + + continue + } + + o3(`for i := 0; i < len(r1.%s); i++ { + if r1.%s[i] != r2.%s[i] { + return false + } + }`) + + continue + } + + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"a"`, `dns:"aaaa"`: + o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}") + case `dns:"cdomain-name"`, `dns:"domain-name"`: + o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}") + default: + o2("if r1.%s != r2.%s {\nreturn false\n}") + } + } + fmt.Fprintf(b, "return true\n}\n\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zduplicate.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 0000000000..721a0fce32 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,328 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): + // directly write instead of using o() so we get the error check in the correct place + field := st.Field(i).Name() + fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty +if rr.%s != "-" { + off, err = packStringHex(rr.%s, msg, off) + if err != nil { + return off, err + } +} +`, field, field) + continue + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"any"`: + o("off, err = packStringAny(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name) + fmt.Fprint(b, `rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"any"`: + o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return off, nil + } +`) + } + } + fmt.Fprintf(b, "return off, nil }\n\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 0000000000..8cda2a74c5 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,285 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, + "CSYNC": {}, +} + +var packageHdr = ` +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`: + o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n") + case `dns:"domain-name"`: + o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n") + case `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + o("l += domainNameLen(rr.%s, off+l, compression, true)\n") + case st.Tag(i) == `dns:"domain-name"`: + o("l += domainNameLen(rr.%s, off+l, compression, false)\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored + o("l += len(rr.%s)/2\n") + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2\n") + case st.Tag(i) == `dns:"any"`: + o("l += len(rr.%s)\n") + case st.Tag(i) == `dns:"a"`: + o("if len(rr.%s) != 0 { l += net.IPv4len }\n") + case st.Tag(i) == `dns:"aaaa"`: + o("if len(rr.%s) != 0 { l += net.IPv6len }\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l++ // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"rr.Hdr"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + // For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element. + if t == "EDNS0" { + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/minio/minio-go/v6/functional_tests.go b/vendor/github.com/minio/minio-go/v6/functional_tests.go new file mode 100644 index 0000000000..fb66f06fe3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/functional_tests.go @@ -0,0 +1,10247 @@ +// +build ignore + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/dustin/go-humanize" + log "github.com/sirupsen/logrus" + + "github.com/minio/minio-go/v6" + "github.com/minio/minio-go/v6/pkg/encrypt" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-1-b": 1, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + pc, _, _, _ := runtime.Caller(1) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, region); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + if err = c.MakeBucket(bucketName, region); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, region); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + data := bytes.Repeat([]byte("a"), int(0)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != 0 { + logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err) + return + } + + objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := bytes.Repeat([]byte("a"), int(size)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if n != size { + logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) + return + } + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := io.ReadFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test RemoveObjectsWithContext request context cancels after timeout +func testRemoveObjectsWithContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan string) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- objectName + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjectsWithContext API with short timeout. + errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + // Delete all objects and buckets. + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan string) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- objectName + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(bucketName, objectsCh) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + var fileName = getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket()bucketName, location" + err = c.MakeBucket(bucketName, location) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + totalSize := dataFileMap["datafile-129-MB"] + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return + } + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + defer tmpFile.Close() + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + if err = os.Remove(fName + ".gtar"); err != nil { + logError(testName, function, args, startTime, "", "File remove failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + totalSize := dataFileMap["datafile-1-MB"] + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + totalSize := dataFileMap["datafile-1-MB"] + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err) + return + } + if n != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err) + return + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, n) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetKeyStartsWith(""); err == nil { + logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + var filePath = getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + // make post request with correct form data + res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(bucketName+"-copy", "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.NewSourceInfo(bucketName, objectName, nil) + args["src"] = src + + // Set copy conditions. + + // All invalid conditions first. + err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) + return + } + err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) + return + } + err = src.SetMatchETagCond("") + if err == nil { + logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) + return + } + err = src.SetMatchETagExceptCond("") + if err == nil { + logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) + return + } + + err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) + return + } + err = src.SetMatchETagCond(objInfo.ETag) + if err != nil { + logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) + return + } + + dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) + args["dst"] = dst + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + // Perform the Copy + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.NewSourceInfo(bucketName, objectName, nil) + err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) + return + } + err = src.SetMatchETagExceptCond(objInfo.ETag) + if err != nil { + logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) + return + } + + // Perform the Copy which should fail + err = c.CopyObject(dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + // Perform the Copy which should update only metadata. + src = minio.NewSourceInfo(bucketName, objectName, nil) + dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ + "Copy": "should be same", + }) + args["dst"] = dst + args["src"] = src + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + if err = cleanupBucket(bucketName+"-copy", c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return + } + + // read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + if err = os.Remove(fileName); err != nil { + logError(testName, function, args, startTime, "", "File remove failed", err) + return + } + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(testName, function, args, startTime).Info() + + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + if err = os.Remove(fileName); err != nil { + logError(testName, function, args, startTime, "", "File remove failed", err) + return + } + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := minio.NewNotificationConfig(topicArn) + + topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := minio.NewNotificationConfig(queueArn) + queueConfig.AddEvents(minio.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + bNotification := minio.BucketNotification{} + bNotification.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + bNotification.AddTopic(topicConfig) + if len(bNotification.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + bNotification.AddQueue(queueConfig) + bNotification.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(bucketName, bNotification) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + bNotification, err = c.GetBucketNotification(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(bNotification.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(bucketName, "us-east-1") + + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Verify if bucket exits and you have access with context. + function = "BucketExistsWithContext(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExistsWithContext(context.Background(), bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "BucketExistsWithContext failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(bucketName, writeOnlyPolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets() + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // List all buckets with context. + function = "ListBucketsWithContext()" + functionAll += ", " + function + args = nil + buckets, err = c.ListBucketsWithContext(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBucketsWithContext failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + // Verify if presigned url works. + resp, err := http.Head(presignedHeadURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively cancelled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: http.DefaultTransport, + } + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(bucketName, objectName) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(bucketName, objectName+"-f") + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(bucketName, objectName+"-nolength") + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(bucketName, objectName+"-presigned") + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(bucketName) + + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + if err = os.Remove(fileName); err != nil { + logError(testName, function, args, startTime, "", "File Remove failed", err) + return + } + if err = os.Remove(fileName + "-f"); err != nil { + logError(testName, function, args, startTime, "", "File Remove failed", err) + return + } + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer c.RemoveBucket(bucketName) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(bucketName, objectName) + + reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer c.RemoveBucket(bucketName) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + var length = 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if n != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err) + return + } + tempfile.Close() + + obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err = obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if n != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(bucketName, region); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + if err = c.MakeBucket(bucketName, region); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(file.Name(), fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Check headers + rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) + return + } + + rGTar, err = c.StatObjectWithContext(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + err = os.Remove(fileName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File remove failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) + return + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(bucketName+"-copy", "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return + } + + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.NewSourceInfo(bucketName, objectName, nil) + args["source"] = src + + // Set copy conditions. + + // All invalid conditions first. + err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) + return + } + err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) + return + } + err = src.SetMatchETagCond("") + if err == nil { + logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) + return + } + err = src.SetMatchETagExceptCond("") + if err == nil { + logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) + return + } + + err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) + return + } + err = src.SetMatchETagCond(objInfo.ETag) + if err != nil { + logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) + return + } + + dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) + args["destination"] = dst + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + // Perform the Copy + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.NewSourceInfo(bucketName, objectName, nil) + err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) + return + } + err = src.SetMatchETagExceptCond(objInfo.ETag) + if err != nil { + logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) + return + } + + // Perform the Copy which should fail + err = c.CopyObject(dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + if err = cleanupBucket(bucketName+"-copy", c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.SourceInfo{} + srcSlice := srcArr[:] + dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if err := c.ComposeObject(dst, srcSlice); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) + err = badSrc.SetRange(1, badSrcSize) + if err != nil { + logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err) + return + } + // 3. ComposeObject call should fail. + if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.SourceInfo{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) + } + // make the last part very small + err = srcs[9].SetRange(0, 0) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + args["sourceList"] = srcs + + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) + args["destination"] = dst + + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + err = c.ComposeObject(dst, srcs) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test concatenating multiple objects objects +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) + if err != nil { + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + srcInfo := minio.NewSourceInfo(bucketName, "object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) + if err != nil { + args["objectName"] = "new-object" + function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) + if err = c.CopyObject(dstInfo, srcInfo); err != nil { + function = "CopyObject(dstInfo, srcInfo)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + // Delete all objects and buckets + delete(args, "objectName") + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.NewSourceInfo(bucketName, "srcObject", srcEncryption) + args["source"] = src + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{c} + reader, _, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + // Test in-place decryption. + dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) + args["source"] = src + err = c.CopyObject(dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, _, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = ioutil.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(bucketName, "us-east-1"); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption)) + args["source"] = src + dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + args["destination"] = dst + + if err = c.CopyObject(dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(bucketName, objectName, uploadID, completeParts) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: srcencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 6*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcEncryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcEncryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcEncryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.NewSourceInfo(bucketName, "srcObject", nil) + // 2.1 create destination with metadata set + dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + err = c.CopyObject(dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + src = minio.NewSourceInfo(bucketName, "srcObject", nil) + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + err = c.CopyObject(dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + srcs := []minio.SourceInfo{ + minio.NewSourceInfo(bucketName, "srcObject", nil), + minio.NewSourceInfo(bucketName, "srcObject", nil), + } + dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + function = "ComposeObject(destination, sources)" + args["source"] = srcs + args["destination"] = dst3 + err = c.ComposeObject(dst3, srcs) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + srcs = []minio.SourceInfo{ + minio.NewSourceInfo(bucketName, "srcObject", nil), + minio.NewSourceInfo(bucketName, "srcObject", nil), + } + dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) + if err != nil { + logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) + return + } + + function = "ComposeObject(destination, sources)" + args["source"] = srcs + args["destination"] = dst4 + err = c.ComposeObject(dst4, srcs) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil) + dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil) + if err = c.CopyObject(dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil) + dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil) + if err = c.CopyObject(dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + var reader = getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + if n != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + args["size"] = n + if n != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err) + return + } + + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + if n != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating 10K objects +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(bucketName, location) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(bucketName, readWritePolicy) + + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets() + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if n != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + if n != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + // Verify if presigned url works. + resp, err := http.Head(presignedHeadURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively cancelled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: http.DefaultTransport, + } + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + if err = os.Remove(fileName); err != nil { + logError(testName, function, args, startTime, "", "File remove failed", err) + return + } + if err = os.Remove(fileName + "-f"); err != nil { + logError(testName, function, args, startTime, "", "File removes failed", err) + return + } + successLogger(testName, functionAll, args, startTime).Info() +} + +// Test get object with GetObjectWithContext +func testGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object ACLs with GetObjectACLWithContext +func testGetObjectACLWithContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACLWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACLWithContext(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + var reader2 = getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACLWithContext(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer c.RemoveBucket(bucketName) + bufSize := dataFileMap["datatfile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with GetObjectWithContext +func testGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + bufSize := dataFileMap["datatfile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() + +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + testObjects := []struct { + name string + storageClass string + }{ + // \x17 is a forbidden character in a xml document + {"foo\x17bar", "STANDARD"}, + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(string, string, bool, <-chan struct{}) <-chan minio.ObjectInfo, bucket string) { + // Create a done channel to control 'ListObjects' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(bucket, "", true, doneCh) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + } + } + + testList(c.ListObjects, bucketName) + testList(c.ListObjectsV2, bucketName) + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + // Output to stdout instead of the default stderr + log.SetOutput(os.Stdout) + // create custom formatter + mintFormatter := mintJSONFormatter{} + // set custom formatter + log.SetFormatter(&mintFormatter) + // log Info or above -- success cases are Info level, failures are Fatal level + log.SetLevel(log.InfoLevel) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kmsEnabled := mustParseBool(os.Getenv(enableKMS)) + // execute tests + if isFullMode() { + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectWithContextV2() + testFPutObjectWithContextV2() + testFGetObjectWithContextV2() + testPutObjectWithContextV2() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testRemoveMultipleObjects() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectWithContext() + testFPutObjectWithContext() + testFGetObjectWithContext() + + testGetObjectACLWithContext() + + testPutObjectWithContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + + // SSE-C tests will only work over TLS connection. + if tls { + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + if kmsEnabled { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go b/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go new file mode 100644 index 0000000000..2b8a4d3b05 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +package main + +import ( + "log" + "time" + + "github.com/shurcooL/vfsgen" + + "github.com/prometheus/alertmanager/asset" + "github.com/prometheus/alertmanager/pkg/modtimevfs" +) + +func main() { + fs := modtimevfs.New(asset.Assets, time.Unix(1, 0)) + err := vfsgen.Generate(fs, vfsgen.Options{ + PackageName: "asset", + BuildTags: "!dev", + VariableName: "Assets", + }) + if err != nil { + log.Fatalln(err) + } +} diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go index 52c2cea0d3..6c498f3d48 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.pb.go @@ -1,32 +1,22 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/weaveworks/common/httpgrpc/httpgrpc.proto +// source: httpgrpc/httpgrpc.proto -/* - Package httpgrpc is a generated protocol buffer package. - - It is generated from these files: - github.com/weaveworks/common/httpgrpc/httpgrpc.proto - - It has these top-level messages: - HTTPRequest - HTTPResponse - Header -*/ package httpgrpc -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import bytes "bytes" - -import strings "strings" -import reflect "reflect" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" +import ( + bytes "bytes" + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -37,18 +27,46 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type HTTPRequest struct { Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - Headers []*Header `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty"` + Headers []*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` } -func (m *HTTPRequest) Reset() { *m = HTTPRequest{} } -func (*HTTPRequest) ProtoMessage() {} -func (*HTTPRequest) Descriptor() ([]byte, []int) { return fileDescriptorHttpgrpc, []int{0} } +func (m *HTTPRequest) Reset() { *m = HTTPRequest{} } +func (*HTTPRequest) ProtoMessage() {} +func (*HTTPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6670c8e151665986, []int{0} +} +func (m *HTTPRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRequest.Merge(m, src) +} +func (m *HTTPRequest) XXX_Size() int { + return m.Size() +} +func (m *HTTPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo func (m *HTTPRequest) GetMethod() string { if m != nil { @@ -79,14 +97,42 @@ func (m *HTTPRequest) GetBody() []byte { } type HTTPResponse struct { - Code int32 `protobuf:"varint,1,opt,name=Code,json=code,proto3" json:"Code,omitempty"` - Headers []*Header `protobuf:"bytes,2,rep,name=headers" json:"headers,omitempty"` + Code int32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` + Headers []*Header `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` } -func (m *HTTPResponse) Reset() { *m = HTTPResponse{} } -func (*HTTPResponse) ProtoMessage() {} -func (*HTTPResponse) Descriptor() ([]byte, []int) { return fileDescriptorHttpgrpc, []int{1} } +func (m *HTTPResponse) Reset() { *m = HTTPResponse{} } +func (*HTTPResponse) ProtoMessage() {} +func (*HTTPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6670c8e151665986, []int{1} +} +func (m *HTTPResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPResponse.Merge(m, src) +} +func (m *HTTPResponse) XXX_Size() int { + return m.Size() +} +func (m *HTTPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPResponse proto.InternalMessageInfo func (m *HTTPResponse) GetCode() int32 { if m != nil { @@ -111,12 +157,40 @@ func (m *HTTPResponse) GetBody() []byte { type Header struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Values []string `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_6670c8e151665986, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) } -func (m *Header) Reset() { *m = Header{} } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptorHttpgrpc, []int{2} } +var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Header) GetKey() string { if m != nil { @@ -137,6 +211,32 @@ func init() { proto.RegisterType((*HTTPResponse)(nil), "httpgrpc.HTTPResponse") proto.RegisterType((*Header)(nil), "httpgrpc.Header") } + +func init() { proto.RegisterFile("httpgrpc/httpgrpc.proto", fileDescriptor_6670c8e151665986) } + +var fileDescriptor_6670c8e151665986 = []byte{ + // 295 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0x28, 0x29, 0x29, + 0x48, 0x2f, 0x2a, 0x48, 0xd6, 0x87, 0x31, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0x60, + 0x7c, 0xa5, 0x72, 0x2e, 0x6e, 0x8f, 0x90, 0x90, 0x80, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, + 0x21, 0x31, 0x2e, 0xb6, 0xdc, 0xd4, 0x92, 0x8c, 0xfc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, + 0x20, 0x28, 0x4f, 0x48, 0x80, 0x8b, 0xb9, 0xb4, 0x28, 0x47, 0x82, 0x09, 0x2c, 0x08, 0x62, 0x0a, + 0x69, 0x71, 0xb1, 0x67, 0xa4, 0x26, 0xa6, 0xa4, 0x16, 0x15, 0x4b, 0x30, 0x2b, 0x30, 0x6b, 0x70, + 0x1b, 0x09, 0xe8, 0xc1, 0x2d, 0xf1, 0x00, 0x4b, 0x04, 0xc1, 0x14, 0x08, 0x09, 0x71, 0xb1, 0x24, + 0xe5, 0xa7, 0x54, 0x4a, 0xb0, 0x28, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x4a, 0x49, 0x5c, 0x3c, + 0x10, 0x8b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x41, 0x6a, 0x9c, 0xf3, 0x53, 0x52, 0xc1, 0xf6, + 0xb2, 0x06, 0x81, 0xd9, 0xc8, 0x76, 0x30, 0x11, 0x6b, 0x07, 0x33, 0x92, 0x1d, 0x46, 0x5c, 0x6c, + 0x10, 0x65, 0x20, 0xf7, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x05, 0x62, 0x82, 0x7c, 0x5a, 0x96, 0x98, + 0x53, 0x9a, 0x0a, 0x31, 0x9a, 0x33, 0x08, 0xca, 0x33, 0x72, 0xe4, 0x62, 0x01, 0xb9, 0x4b, 0xc8, + 0x92, 0x8b, 0xcd, 0x23, 0x31, 0x2f, 0x25, 0x27, 0x55, 0x48, 0x14, 0xc9, 0x52, 0x44, 0x50, 0x49, + 0x89, 0xa1, 0x0b, 0x43, 0x3c, 0xa2, 0xc4, 0xe0, 0x64, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, + 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c, 0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, + 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, + 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, + 0x21, 0x0a, 0x1e, 0x27, 0x49, 0x6c, 0xe0, 0x48, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xd9, + 0x2e, 0xd7, 0x22, 0xbf, 0x01, 0x00, 0x00, +} + func (this *HTTPRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -299,8 +399,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for HTTP service - +// HTTPClient is the client API for HTTP service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type HTTPClient interface { Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.CallOption) (*HTTPResponse, error) } @@ -315,19 +416,26 @@ func NewHTTPClient(cc *grpc.ClientConn) HTTPClient { func (c *hTTPClient) Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.CallOption) (*HTTPResponse, error) { out := new(HTTPResponse) - err := grpc.Invoke(ctx, "/httpgrpc.HTTP/Handle", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/httpgrpc.HTTP/Handle", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for HTTP service - +// HTTPServer is the server API for HTTP service. type HTTPServer interface { Handle(context.Context, *HTTPRequest) (*HTTPResponse, error) } +// UnimplementedHTTPServer can be embedded to have forward compatible implementations. +type UnimplementedHTTPServer struct { +} + +func (*UnimplementedHTTPServer) Handle(ctx context.Context, req *HTTPRequest) (*HTTPResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Handle not implemented") +} + func RegisterHTTPServer(s *grpc.Server, srv HTTPServer) { s.RegisterService(&_HTTP_serviceDesc, srv) } @@ -360,13 +468,13 @@ var _HTTP_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "github.com/weaveworks/common/httpgrpc/httpgrpc.proto", + Metadata: "httpgrpc/httpgrpc.proto", } func (m *HTTPRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -374,47 +482,57 @@ func (m *HTTPRequest) Marshal() (dAtA []byte, err error) { } func (m *HTTPRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Method) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Method))) - i += copy(dAtA[i:], m.Method) - } - if len(m.Url) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Url))) - i += copy(dAtA[i:], m.Url) + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x22 } if len(m.Headers) > 0 { - for _, msg := range m.Headers { - dAtA[i] = 0x1a - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHttpgrpc(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Body) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Body))) - i += copy(dAtA[i:], m.Body) + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x12 + } + if len(m.Method) > 0 { + i -= len(m.Method) + copy(dAtA[i:], m.Method) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Method))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *HTTPResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -422,40 +540,48 @@ func (m *HTTPResponse) Marshal() (dAtA []byte, err error) { } func (m *HTTPResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Code != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(m.Code)) + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x1a } if len(m.Headers) > 0 { - for _, msg := range m.Headers { - dAtA[i] = 0x12 - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHttpgrpc(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Body) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Body))) - i += copy(dAtA[i:], m.Body) + if m.Code != 0 { + i = encodeVarintHttpgrpc(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -463,44 +589,49 @@ func (m *Header) Marshal() (dAtA []byte, err error) { } func (m *Header) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintHttpgrpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func encodeVarintHttpgrpc(dAtA []byte, offset int, v uint64) int { + offset -= sovHttpgrpc(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *HTTPRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Method) @@ -525,6 +656,9 @@ func (m *HTTPRequest) Size() (n int) { } func (m *HTTPResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Code != 0 { @@ -544,6 +678,9 @@ func (m *HTTPResponse) Size() (n int) { } func (m *Header) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -560,14 +697,7 @@ func (m *Header) Size() (n int) { } func sovHttpgrpc(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozHttpgrpc(x uint64) (n int) { return sovHttpgrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -576,10 +706,15 @@ func (this *HTTPRequest) String() string { if this == nil { return "nil" } + repeatedStringForHeaders := "[]*Header{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(f.String(), "Header", "Header", 1) + "," + } + repeatedStringForHeaders += "}" s := strings.Join([]string{`&HTTPRequest{`, `Method:` + fmt.Sprintf("%v", this.Method) + `,`, `Url:` + fmt.Sprintf("%v", this.Url) + `,`, - `Headers:` + strings.Replace(fmt.Sprintf("%v", this.Headers), "Header", "Header", 1) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, `Body:` + fmt.Sprintf("%v", this.Body) + `,`, `}`, }, "") @@ -589,9 +724,14 @@ func (this *HTTPResponse) String() string { if this == nil { return "nil" } + repeatedStringForHeaders := "[]*Header{" + for _, f := range this.Headers { + repeatedStringForHeaders += strings.Replace(f.String(), "Header", "Header", 1) + "," + } + repeatedStringForHeaders += "}" s := strings.Join([]string{`&HTTPResponse{`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, - `Headers:` + strings.Replace(fmt.Sprintf("%v", this.Headers), "Header", "Header", 1) + `,`, + `Headers:` + repeatedStringForHeaders + `,`, `Body:` + fmt.Sprintf("%v", this.Body) + `,`, `}`, }, "") @@ -631,7 +771,7 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -659,7 +799,7 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -669,6 +809,9 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -688,7 +831,7 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -698,6 +841,9 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -717,7 +863,7 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -726,6 +872,9 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -748,7 +897,7 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -757,6 +906,9 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -774,6 +926,9 @@ func (m *HTTPRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthHttpgrpc } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttpgrpc + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -801,7 +956,7 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -829,7 +984,7 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -848,7 +1003,7 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -857,6 +1012,9 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -879,7 +1037,7 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -888,6 +1046,9 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -905,6 +1066,9 @@ func (m *HTTPResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthHttpgrpc } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttpgrpc + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -932,7 +1096,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -960,7 +1124,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -970,6 +1134,9 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -989,7 +1156,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -999,6 +1166,9 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthHttpgrpc } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpgrpc + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1013,6 +1183,9 @@ func (m *Header) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthHttpgrpc } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttpgrpc + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1028,6 +1201,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { func skipHttpgrpc(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1059,10 +1233,8 @@ func skipHttpgrpc(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1079,81 +1251,34 @@ func skipHttpgrpc(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthHttpgrpc } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpgrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipHttpgrpc(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupHttpgrpc + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthHttpgrpc + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthHttpgrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHttpgrpc = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthHttpgrpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttpgrpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupHttpgrpc = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("github.com/weaveworks/common/httpgrpc/httpgrpc.proto", fileDescriptorHttpgrpc) -} - -var fileDescriptorHttpgrpc = []byte{ - // 319 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x49, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, 0x4f, 0x4d, 0x2c, 0x4b, 0x2d, 0xcf, 0x2f, 0xca, - 0x2e, 0xd6, 0x4f, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0xcf, 0x28, 0x29, 0x29, 0x48, 0x2f, 0x2a, - 0x48, 0x86, 0x33, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0x60, 0x7c, 0xa5, 0x72, 0x2e, - 0x6e, 0x8f, 0x90, 0x90, 0x80, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x21, 0x31, 0x2e, 0xb6, - 0xdc, 0xd4, 0x92, 0x8c, 0xfc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x28, 0x4f, 0x48, - 0x80, 0x8b, 0xb9, 0xb4, 0x28, 0x47, 0x82, 0x09, 0x2c, 0x08, 0x62, 0x0a, 0x69, 0x71, 0xb1, 0x67, - 0xa4, 0x26, 0xa6, 0xa4, 0x16, 0x15, 0x4b, 0x30, 0x2b, 0x30, 0x6b, 0x70, 0x1b, 0x09, 0xe8, 0xc1, - 0x2d, 0xf1, 0x00, 0x4b, 0x04, 0xc1, 0x14, 0x08, 0x09, 0x71, 0xb1, 0x24, 0xe5, 0xa7, 0x54, 0x4a, - 0xb0, 0x28, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x4a, 0x49, 0x5c, 0x3c, 0x10, 0x8b, 0x8b, 0x0b, - 0xf2, 0xf3, 0x8a, 0x53, 0x41, 0x6a, 0x9c, 0xf3, 0x53, 0x52, 0xc1, 0xf6, 0xb2, 0x06, 0xb1, 0x24, - 0xe7, 0xa7, 0xa4, 0x22, 0xdb, 0xc1, 0x44, 0xac, 0x1d, 0xcc, 0x48, 0x76, 0x18, 0x71, 0xb1, 0x41, - 0x94, 0x81, 0xdc, 0x9f, 0x9d, 0x5a, 0x09, 0xf5, 0x14, 0x88, 0x09, 0xf2, 0x69, 0x59, 0x62, 0x4e, - 0x69, 0x2a, 0xc4, 0x68, 0xce, 0x20, 0x28, 0xcf, 0xc8, 0x91, 0x8b, 0x05, 0xe4, 0x2e, 0x21, 0x4b, - 0x2e, 0x36, 0x8f, 0xc4, 0xbc, 0x94, 0x9c, 0x54, 0x21, 0x51, 0x24, 0x4b, 0x11, 0x41, 0x25, 0x25, - 0x86, 0x2e, 0x0c, 0xf1, 0x88, 0x12, 0x83, 0x53, 0xf0, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, - 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, - 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, - 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0x88, 0x52, 0x25, 0x2a, 0x06, 0x93, 0xd8, 0xc0, 0x31, - 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x49, 0xec, 0x86, 0xd6, 0xf1, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto index 5802486e80..c9c267486d 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto +++ b/vendor/github.com/weaveworks/common/httpgrpc/httpgrpc.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package httpgrpc; -option go_package = "github.com/weaveworks/common/httpgrpc"; +option go_package = "httpgrpc"; service HTTP { rpc Handle(HTTPRequest) returns (HTTPResponse) {}; diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 0000000000..2a7661c27b --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,383 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, + { + "https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", + parseAddrFamilyNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range dr.escapeDSCP() { + fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + for _, er := range dr.escapeECN() { + fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value) + fmt.Fprintf(w, "// %s\n", er.OrigDescr) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + Registries []struct { + Title string `xml:"title"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"record"` + } `xml:"registry"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord { + var drs []canonDSCPRecord + for _, preg := range drr.Registries { + if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") { + continue + } + for _, reg := range preg.Registries { + if !strings.Contains(reg.Title, "Pool 1 Codepoints") { + continue + } + drs = make([]canonDSCPRecord, len(reg.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range reg.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + } + } + return drs +} + +type canonECNRecord struct { + OrigDescr string + Descr string + Value int +} + +func (drr *dscpRegistry) escapeECN() []canonECNRecord { + var ers []canonECNRecord + for _, reg := range drr.Registries { + if !strings.Contains(reg.Title, "ECN Field") { + continue + } + ers = make([]canonECNRecord, len(reg.Records)) + sr := strings.NewReplacer( + "Capable", "", + "Not-ECT", "", + "ECT(1)", "", + "ECT(0)", "", + "CE", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, er := range reg.Records { + s := strings.TrimSpace(er.Descr) + ers[i].OrigDescr = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + ers[i].Descr = strings.Join(ss[1:], " ") + } else { + ers[i].Descr = ss[0] + } + ers[i].Descr = sr.Replace(er.Descr) + n, err := strconv.ParseUint(er.Value, 2, 8) + if err != nil { + continue + } + ers[i].Value = int(n) + } + } + return ers +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} + +func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var afn addrFamilylNumbers + if err := dec.Decode(&afn); err != nil { + return err + } + afrs := afn.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) + fmt.Fprintf(w, "const (\n") + for _, afr := range afrs { + if afr.Name == "" { + continue + } + fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) + fmt.Fprintf(w, "// %s\n", afr.Descr) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type addrFamilylNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonAddrFamilyRecord struct { + Name string + Descr string + Value int +} + +func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { + afrs := make([]canonAddrFamilyRecord, len(afn.Records)) + sr := strings.NewReplacer( + "IP version 4", "IPv4", + "IP version 6", "IPv6", + "Identifier", "ID", + "-", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, afr := range afn.Records { + if strings.Contains(afr.Descr, "Unassigned") || + strings.Contains(afr.Descr, "Reserved") { + continue + } + afrs[i].Descr = afr.Descr + s := strings.TrimSpace(afr.Descr) + switch s { + case "IP (IP version 4)": + afrs[i].Name = "IPv4" + case "IP6 (IP version 6)": + afrs[i].Name = "IPv6" + case "AFI for L2VPN information": + afrs[i].Name = "L2VPN" + case "E.164 with NSAP format subaddress": + afrs[i].Name = "E164withSubaddress" + case "MT IP: Multi-Topology IP version 4": + afrs[i].Name = "MTIPv4" + case "MAC/24": + afrs[i].Name = "MACFinal24bits" + case "MAC/40": + afrs[i].Name = "MACFinal40bits" + case "IPv6/64": + afrs[i].Name = "IPv6Initial64bits" + default: + n := strings.Index(s, "(") + if n > 0 { + s = s[:n] + } + n = strings.Index(s, ":") + if n > 0 { + s = s[:n] + } + afrs[i].Name = sr.Replace(s) + } + afrs[i].Value, _ = strconv.Atoi(afr.Value) + } + return afrs +} diff --git a/vendor/golang.org/x/net/internal/socket/defs_aix.go b/vendor/golang.org/x/net/internal/socket/defs_aix.go new file mode 100644 index 0000000000..ae1b21c5e1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 0000000000..b780bc67ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 0000000000..b780bc67ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 0000000000..b780bc67ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 0000000000..85bb7450b0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,40 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 0000000000..5bfdd4676c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,38 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 0000000000..b780bc67ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 0000000000..b780bc67ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/ipv4/defs_aix.go b/vendor/golang.org/x/net/ipv4/defs_aix.go new file mode 100644 index 0000000000..0f37211c64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_aix.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + // IP_RECVIF is defined on AIX but doesn't work. + // IP_RECVINTERFACE must be used instead. + sysIP_RECVIF = C.IP_RECVINTERFACE + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 0000000000..c8f2e05b81 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 0000000000..f30544ea24 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 0000000000..4dd57d8653 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 0000000000..beb11071ad --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 0000000000..8f8af1b899 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 0000000000..8f8af1b899 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 0000000000..aeb33e9c8f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 0000000000..1bb1737f67 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/defs_aix.go b/vendor/golang.org/x/net/ipv6/defs_aix.go new file mode 100644 index 0000000000..ea396a3cb8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_aix.go @@ -0,0 +1,82 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 0000000000..55ddc116fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 0000000000..27a1d1d6f7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,82 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 0000000000..53e625389a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 0000000000..3308cb2c38 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 0000000000..be9ceb9cc0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 0000000000..177ddf87d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 0000000000..0f8ce2b46a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 0000000000..5885664fbc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 0000000000..372ffbb24c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,717 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + numICANNRules = 0 + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + if len(rules) != 0 { + return fmt.Errorf(`expected no rules before "BEGIN ICANN DOMAINS"`) + } + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann, numICANNRules = false, len(rules) + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nconst numICANNRules = %d\n\nvar rules = [...]string{\n", numICANNRules) + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go new file mode 100644 index 0000000000..6f7bb6edfb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkasm_darwin.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. +//This program must be run after mksyscall.go. +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "strings" +) + +func writeASMFile(in string, fileName string, buildTags string) { + trampolines := map[string]bool{} + + var out bytes.Buffer + + fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) + fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") + fmt.Fprintf(&out, "\n") + fmt.Fprintf(&out, "// +build %s\n", buildTags) + fmt.Fprintf(&out, "\n") + fmt.Fprintf(&out, "#include \"textflag.h\"\n") + for _, line := range strings.Split(in, "\n") { + if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { + continue + } + fn := line[5 : len(line)-13] + if !trampolines[fn] { + trampolines[fn] = true + fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) + fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) + } + } + err := ioutil.WriteFile(fileName, out.Bytes(), 0644) + if err != nil { + log.Fatalf("can't write %s: %s", fileName, err) + } +} + +func main() { + in1, err := ioutil.ReadFile("syscall_darwin.go") + if err != nil { + log.Fatalf("can't open syscall_darwin.go: %s", err) + } + arch := os.Args[1] + in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) + } + in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) + } + in := string(in1) + string(in2) + string(in3) + + writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12") + + in1, err = ioutil.ReadFile("syscall_darwin.1_13.go") + if err != nil { + log.Fatalf("can't open syscall_darwin.1_13.go: %s", err) + } + in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch)) + if err != nil { + log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err) + } + + in = string(in1) + string(in2) + + writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13") +} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go new file mode 100644 index 0000000000..eb4332059a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -0,0 +1,122 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkpost processes the output of cgo -godefs to +// modify the generated types. It is used to clean up +// the sys API in an architecture specific manner. +// +// mkpost is run after cgo -godefs; see README.md. +package main + +import ( + "bytes" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "regexp" +) + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check that we are using the Docker-based build system if we should be. + if goos == "linux" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") + os.Stderr.WriteString("See README.md\n") + os.Exit(1) + } + } + + b, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatal(err) + } + + if goos == "aix" { + // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t + // to avoid having both StTimespec and Timespec. + sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) + b = sttimespec.ReplaceAll(b, []byte("Timespec")) + } + + // Intentionally export __val fields in Fsid and Sigset_t + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) + + // Intentionally export __fds_bits field in FdSet + fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) + b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) + + // If we have empty Ptrace structs, we should delete them. Only s390x emits + // nonempty Ptrace structs. + ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) + b = ptraceRexexp.ReplaceAll(b, nil) + + // Replace the control_regs union with a blank identifier for now. + controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) + b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) + + // Remove fields that are added by glibc + // Note that this is unstable as the identifers are private. + removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + + // Convert [65]int8 to [65]byte in Utsname members to simplify + // conversion to string; see golang.org/issue/20753 + convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) + b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + + // Convert [1024]int8 to [1024]byte in Ptmget members + convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) + b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) + + // Remove spare fields (e.g. in Statx_t) + spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) + b = spareFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove cgo padding fields + removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) + b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove padding, hidden, or unused fields + removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove the first line of warning from cgo + b = b[bytes.IndexByte(b, '\n')+1:] + // Modify the command in the header to include: + // mkpost, our own warning, and a build tag. + replacement := fmt.Sprintf(`$1 | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s,%s`, goarch, goos) + cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) + b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) + + // Rename Stat_t time fields + if goos == "freebsd" && goarch == "386" { + // Hide Stat_t.[AMCB]tim_ext fields + renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) + b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) + } + renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) + b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) + + // gofmt + b, err = format.Source(b) + if err != nil { + log.Fatal(err) + } + + os.Stdout.Write(b) +} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go new file mode 100644 index 0000000000..9e540cc892 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall.go @@ -0,0 +1,402 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_darwin.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named errno. + +A line beginning with //sysnb is like //sys, except that the +goroutine will not be suspended during the execution of the system +call. This must only be used for system calls which can never +block, as otherwise the system call could cause all goroutines to +hang. +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + plan9 = flag.Bool("plan9", false, "plan9") + openbsd = flag.Bool("openbsd", false, "openbsd") + netbsd = flag.Bool("netbsd", false, "netbsd") + dragonfly = flag.Bool("dragonfly", false, "dragonfly") + arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair + tags = flag.String("tags", "", "build tags") + filename = flag.String("output", "", "output file name (standard output if omitted)") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + if goos == "" { + fmt.Fprintln(os.Stderr, "GOOS not defined in environment") + os.Exit(1) + } + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + + // Check that we are using the Docker-based build system if we should + if goos == "linux" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") + fmt.Fprintf(os.Stderr, "See README.md\n") + os.Exit(1) + } + } + + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + libc := false + if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) { + libc = true + } + trampolines := map[string]bool{} + + text := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, errno error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, sysname := f[2], f[3], f[4], f[5] + + // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. + if goos == "darwin" && !libc && funct == "ClockGettime" { + continue + } + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Go function header. + outDecl := "" + if len(out) > 0 { + outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + break + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass dummy pointer in that case. + // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) + text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && (*openbsd || *netbsd) { + args = append(args, "0") + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && *dragonfly { + if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { + if len(args)%2 == 1 && *arm { + // arm abi specifies 64-bit argument uses + // (even, odd) pair + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + + // Determine which form to use; pad args with zeros. + asm := "Syscall" + if nonblock != nil { + if errvar == "" && goos == "linux" { + asm = "RawSyscallNoError" + } else { + asm = "RawSyscall" + } + } else { + if errvar == "" && goos == "linux" { + asm = "SyscallNoError" + } + } + if len(args) <= 3 { + for len(args) < 3 { + args = append(args, "0") + } + } else if len(args) <= 6 { + asm += "6" + for len(args) < 6 { + args = append(args, "0") + } + } else if len(args) <= 9 { + asm += "9" + for len(args) < 9 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) + } + + // System call number. + if sysname == "" { + sysname = "SYS_" + funct + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToUpper(sysname) + } + + var libcFn string + if libc { + asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call + sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ + sysname = strings.ToLower(sysname) // lowercase + libcFn = sysname + sysname = "funcPC(libc_" + sysname + "_trampoline)" + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" && !*plan9 { + reg = "e1" + ret[2] = reg + doErrno = true + } else if p.Name == "err" && *plan9 { + ret[0] = "r0" + ret[2] = "e1" + break + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" || *plan9 { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + if errvar == "" && goos == "linux" { + // raw syscall without error on Linux, see golang.org/issue/22924 + text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + } + text += body + + if *plan9 && ret[2] == "e1" { + text += "\tif int32(r0) == -1 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } else if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = errnoErr(e1)\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n\n" + + if libc && !trampolines[libcFn] { + // some system calls share a trampoline, like read and readlen. + trampolines[libcFn] = true + // Declare assembly trampoline. + text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) + // Assembly trampoline calls the libc_* function, which this magic + // redirects to use the function from libSystem. + text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) + text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) + text += "\n" + } + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go new file mode 100644 index 0000000000..3be3cdfc3b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go @@ -0,0 +1,415 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_aix.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + aix = flag.Bool("aix", false, "aix") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + text := "" + cExtern := "/*\n#include \n#include \n" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Check if value return, err return available + errvar := "" + retvar := "" + rettype := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + } else { + retvar = p.Name + rettype = p.Type + } + } + + // System call name. + if sysname == "" { + sysname = funct + } + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + cRettype := "" + if rettype == "unsafe.Pointer" { + cRettype = "uintptr_t" + } else if rettype == "uintptr" { + cRettype = "uintptr_t" + } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { + cRettype = "uintptr_t" + } else if rettype == "int" { + cRettype = "int" + } else if rettype == "int32" { + cRettype = "int" + } else if rettype == "int64" { + cRettype = "long long" + } else if rettype == "uint32" { + cRettype = "unsigned int" + } else if rettype == "uint64" { + cRettype = "unsigned long long" + } else { + cRettype = "int" + } + if sysname == "exit" { + cRettype = "void" + } + + // Change p.Types to c + var cIn []string + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "string" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t", "size_t") + } else if p.Type == "unsafe.Pointer" { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "uintptr" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "int" { + cIn = append(cIn, "int") + } else if p.Type == "int32" { + cIn = append(cIn, "int") + } else if p.Type == "int64" { + cIn = append(cIn, "long long") + } else if p.Type == "uint32" { + cIn = append(cIn, "unsigned int") + } else if p.Type == "uint64" { + cIn = append(cIn, "unsigned long long") + } else { + cIn = append(cIn, "int") + } + } + + if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } + // Imports of system calls from libc + cExtern += fmt.Sprintf("%s %s", cRettype, sysname) + cIn := strings.Join(cIn, ", ") + cExtern += fmt.Sprintf("(%s);\n", cIn) + } + + // So file name. + if *aix { + if modname == "" { + modname = "libc.a/shr_64.o" + } else { + fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) + os.Exit(1) + } + } + + strconvfunc := "C.CString" + + // Go function header. + if outps != "" { + outps = fmt.Sprintf(" (%s)", outps) + } + if text != "" { + text += "\n" + } + + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) + + // Prepare arguments to Syscall. + var args []string + n := 0 + argN := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) + n++ + text += fmt.Sprintf("\tvar _p%d int\n", n) + text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) + n++ + } else if p.Type == "int64" && endianness != "" { + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + n++ + } else if p.Type == "bool" { + text += fmt.Sprintf("\tvar _p%d uint32\n", n) + text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) + args = append(args, fmt.Sprintf("_p%d", n)) + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) + } else if p.Type == "unsafe.Pointer" { + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) + } else if p.Type == "int" { + if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { + args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) + } else if argN == 0 && funct == "fcntl" { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } + } else if p.Type == "int32" { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } else if p.Type == "int64" { + args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) + } else if p.Type == "uint32" { + args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) + } else if p.Type == "uint64" { + args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) + } else if p.Type == "uintptr" { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } + argN++ + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := "" + if sysname == "exit" { + if errvar != "" { + call += "er :=" + } else { + call += "" + } + } else if errvar != "" { + call += "r0,er :=" + } else if retvar != "" { + call += "r0,_ :=" + } else { + call += "" + } + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) + } else { + call += fmt.Sprintf("C.%s(%s)", sysname, arglist) + } + + // Assign return values. + body := "" + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + } else { + reg = "r0" + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + + // verify return + if sysname != "exit" && errvar != "" { + if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { + body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } else { + body += "\tif (r0 ==-1 && er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } + } else if errvar != "" { + body += "\tif (er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } + + text += fmt.Sprintf("\t%s\n", call) + text += body + + text += "\treturn\n" + text += "}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + + +%s +*/ +import "C" +import ( + "unsafe" +) + + +%s + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go new file mode 100644 index 0000000000..c960099517 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go @@ -0,0 +1,614 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_aix.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt + + +This program will generate three files and handle both gc and gccgo implementation: + - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) + - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 + - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. + + The generated code looks like this + +zsyscall_aix_ppc64.go +func asyscall(...) (n int, err error) { + // Pointer Creation + r1, e1 := callasyscall(...) + // Type Conversion + // Error Handler + return +} + +zsyscall_aix_ppc64_gc.go +//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" +//go:linkname libc_asyscall libc_asyscall +var asyscall syscallFunc + +func callasyscall(...) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) + return +} + +zsyscall_aix_ppc64_ggcgo.go + +// int asyscall(...) + +import "C" + +func callasyscall(...) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.asyscall(...)) + e1 = syscall.GetErrno() + return +} +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "io/ioutil" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + aix = flag.Bool("aix", false, "aix") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + // GCCGO + textgccgo := "" + cExtern := "/*\n#include \n" + // GC + textgc := "" + dynimports := "" + linknames := "" + var vars []string + // COMMON + textcommon := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + if sysname == "" { + sysname = funct + } + + onlyCommon := false + if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { + // This function call another syscall which is already implemented. + // Therefore, the gc and gccgo part must not be generated. + onlyCommon = true + } + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + + textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + if !onlyCommon { + textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + } + + // Check if value return, err return available + errvar := "" + rettype := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + } else { + rettype = p.Type + } + } + + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + // GCCGO Prototype return type + cRettype := "" + if rettype == "unsafe.Pointer" { + cRettype = "uintptr_t" + } else if rettype == "uintptr" { + cRettype = "uintptr_t" + } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { + cRettype = "uintptr_t" + } else if rettype == "int" { + cRettype = "int" + } else if rettype == "int32" { + cRettype = "int" + } else if rettype == "int64" { + cRettype = "long long" + } else if rettype == "uint32" { + cRettype = "unsigned int" + } else if rettype == "uint64" { + cRettype = "unsigned long long" + } else { + cRettype = "int" + } + if sysname == "exit" { + cRettype = "void" + } + + // GCCGO Prototype arguments type + var cIn []string + for i, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "string" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t", "size_t") + } else if p.Type == "unsafe.Pointer" { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "uintptr" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "int" { + if (i == 0 || i == 2) && funct == "fcntl" { + // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock + cIn = append(cIn, "uintptr_t") + } else { + cIn = append(cIn, "int") + } + + } else if p.Type == "int32" { + cIn = append(cIn, "int") + } else if p.Type == "int64" { + cIn = append(cIn, "long long") + } else if p.Type == "uint32" { + cIn = append(cIn, "unsigned int") + } else if p.Type == "uint64" { + cIn = append(cIn, "unsigned long long") + } else { + cIn = append(cIn, "int") + } + } + + if !onlyCommon { + // GCCGO Prototype Generation + // Imports of system calls from libc + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } + cExtern += fmt.Sprintf("%s %s", cRettype, sysname) + cIn := strings.Join(cIn, ", ") + cExtern += fmt.Sprintf("(%s);\n", cIn) + } + // GC Library name + if modname == "" { + modname = "libc.a/shr_64.o" + } else { + fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) + os.Exit(1) + } + sysvarname := fmt.Sprintf("libc_%s", sysname) + + if !onlyCommon { + // GC Runtime import of function to allow cross-platform builds. + dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) + // GC Link symbol to proc address variable. + linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) + // GC Library proc address variable. + vars = append(vars, sysvarname) + } + + strconvfunc := "BytePtrFromString" + strconvtype := "*byte" + + // Go function header. + if outps != "" { + outps = fmt.Sprintf(" (%s)", outps) + } + if textcommon != "" { + textcommon += "\n" + } + + textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) + + // Prepare arguments tocall. + var argscommon []string // Arguments in the common part + var argscall []string // Arguments for call prototype + var argsgc []string // Arguments for gc call (with syscall6) + var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) + n := 0 + argN := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if p.Type == "string" && errvar != "" { + textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) + textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) + n++ + } else if p.Type == "int64" && endianness != "" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") + } else if p.Type == "bool" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { + argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if p.Type == "int" { + if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { + // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock + argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + + } else { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } + } else if p.Type == "int32" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } else if p.Type == "int64" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) + } else if p.Type == "uint32" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) + } else if p.Type == "uint64" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) + } else if p.Type == "uintptr" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } + argN++ + } + nargs := len(argsgc) + + // COMMON function generation + argscommonlist := strings.Join(argscommon, ", ") + callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) + ret := []string{"_", "_"} + body := "" + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + ret[1] = reg + doErrno = true + } else { + reg = "r0" + ret[0] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" { + textcommon += fmt.Sprintf("\t%s\n", callcommon) + } else { + textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) + } + textcommon += body + + if doErrno { + textcommon += "\tif e1 != 0 {\n" + textcommon += "\t\terr = errnoErr(e1)\n" + textcommon += "\t}\n" + } + textcommon += "\treturn\n" + textcommon += "}\n" + + if onlyCommon { + continue + } + + // CALL Prototype + callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) + + // GC function generation + asm := "syscall6" + if nonblock != nil { + asm = "rawSyscall6" + } + + if len(argsgc) <= 6 { + for len(argsgc) < 6 { + argsgc = append(argsgc, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) + os.Exit(1) + } + argsgclist := strings.Join(argsgc, ", ") + callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) + + textgc += callProto + textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) + textgc += "\treturn\n}\n" + + // GCCGO function generation + argsgccgolist := strings.Join(argsgccgo, ", ") + var callgccgo string + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) + } else { + callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) + } + textgccgo += callProto + textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) + textgccgo += "\te1 = syscall.GetErrno()\n" + textgccgo += "\treturn\n}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + + // Print zsyscall_aix_ppc64.go + err := ioutil.WriteFile("zsyscall_aix_ppc64.go", + []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + + // Print zsyscall_aix_ppc64_gc.go + vardecls := "\t" + strings.Join(vars, ",\n\t") + vardecls += " syscallFunc" + err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", + []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + + // Print zsyscall_aix_ppc64_gccgo.go + err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", + []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } +} + +const srcTemplate1 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + +import ( + "unsafe" +) + + +%s + +%s +` +const srcTemplate2 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s +// +build !gccgo + +package %s + +import ( + "unsafe" +) +%s +%s +%s +type syscallFunc uintptr + +var ( +%s +) + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +%s +` +const srcTemplate3 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s +// +build gccgo + +package %s + +%s +*/ +import "C" +import ( + "syscall" +) + + +%s + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go new file mode 100644 index 0000000000..3d864738b6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go @@ -0,0 +1,335 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* + This program reads a file containing function prototypes + (like syscall_solaris.go) and generates system call bodies. + The prototypes are marked by lines beginning with "//sys" + and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + text := "" + dynimports := "" + linknames := "" + var vars []string + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // So file name. + if modname == "" { + modname = "libc" + } + + // System call name. + if sysname == "" { + sysname = funct + } + + // System call pointer variable name. + sysvarname := fmt.Sprintf("proc%s", sysname) + + strconvfunc := "BytePtrFromString" + strconvtype := "*byte" + + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + // Runtime import of function to allow cross-platform builds. + dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) + // Link symbol to proc address variable. + linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) + // Library proc address variable. + vars = append(vars, sysvarname) + + // Go function header. + outlist := strings.Join(out, ", ") + if outlist != "" { + outlist = fmt.Sprintf(" (%s)", outlist) + } + if text != "" { + text += "\n" + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + continue + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && endianness != "" { + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else if p.Type == "bool" { + text += fmt.Sprintf("\tvar _p%d uint32\n", n) + text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) + n++ + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + nargs := len(args) + + // Determine which form to use; pad args with zeros. + asm := "sysvicall6" + if nonblock != nil { + asm = "rawSysvicall6" + } + if len(args) <= 6 { + for len(args) < 6 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) + os.Exit(1) + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + ret[2] = reg + doErrno = true + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%d != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) + os.Exit(1) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + text += body + + if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + vardecls := "\t" + strings.Join(vars, ",\n\t") + vardecls += " syscallFunc" + fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + +import ( + "syscall" + "unsafe" +) +%s +%s +%s +var ( +%s +) + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go new file mode 100644 index 0000000000..b6b409909c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go @@ -0,0 +1,355 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. +// +// Build a MIB with each entry being an array containing the level, type and +// a hash that will contain additional entries if the current entry is a node. +// We then walk this MIB and create a flattened sysctl name to OID hash. + +package main + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" +) + +var ( + goos, goarch string +) + +// cmdLine returns this programs's commandline arguments. +func cmdLine() string { + return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags. +func buildTags() string { + return fmt.Sprintf("%s,%s", goarch, goos) +} + +// reMatch performs regular expression match and stores the substring slice to value pointed by m. +func reMatch(re *regexp.Regexp, str string, m *[]string) bool { + *m = re.FindStringSubmatch(str) + if *m != nil { + return true + } + return false +} + +type nodeElement struct { + n int + t string + pE *map[string]nodeElement +} + +var ( + debugEnabled bool + mib map[string]nodeElement + node *map[string]nodeElement + nodeMap map[string]string + sysCtl []string +) + +var ( + ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) + ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) + ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) + netInetRE = regexp.MustCompile(`^netinet/`) + netInet6RE = regexp.MustCompile(`^netinet6/`) + netRE = regexp.MustCompile(`^net/`) + bracesRE = regexp.MustCompile(`{.*}`) + ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) + fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) +) + +func debug(s string) { + if debugEnabled { + fmt.Fprintln(os.Stderr, s) + } +} + +// Walk the MIB and build a sysctl name to OID mapping. +func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { + lNode := pNode // local copy of pointer to node + var keys []string + for k := range *lNode { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + nodename := name + if name != "" { + nodename += "." + } + nodename += key + + nodeoid := append(oid, (*pNode)[key].n) + + if (*pNode)[key].t == `CTLTYPE_NODE` { + if _, ok := nodeMap[nodename]; ok { + lNode = &mib + ctlName := nodeMap[nodename] + for _, part := range strings.Split(ctlName, ".") { + lNode = ((*lNode)[part]).pE + } + } else { + lNode = (*pNode)[key].pE + } + buildSysctl(lNode, nodename, nodeoid) + } else if (*pNode)[key].t != "" { + oidStr := []string{} + for j := range nodeoid { + oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) + } + text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" + sysCtl = append(sysCtl, text) + } + } +} + +func main() { + // Get the OS (using GOOS_TARGET if it exist) + goos = os.Getenv("GOOS_TARGET") + if goos == "" { + goos = os.Getenv("GOOS") + } + // Get the architecture (using GOARCH_TARGET if it exists) + goarch = os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check if GOOS and GOARCH environment variables are defined + if goarch == "" || goos == "" { + fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") + os.Exit(1) + } + + mib = make(map[string]nodeElement) + headers := [...]string{ + `sys/sysctl.h`, + `sys/socket.h`, + `sys/tty.h`, + `sys/malloc.h`, + `sys/mount.h`, + `sys/namei.h`, + `sys/sem.h`, + `sys/shm.h`, + `sys/vmmeter.h`, + `uvm/uvmexp.h`, + `uvm/uvm_param.h`, + `uvm/uvm_swap_encrypt.h`, + `ddb/db_var.h`, + `net/if.h`, + `net/if_pfsync.h`, + `net/pipex.h`, + `netinet/in.h`, + `netinet/icmp_var.h`, + `netinet/igmp_var.h`, + `netinet/ip_ah.h`, + `netinet/ip_carp.h`, + `netinet/ip_divert.h`, + `netinet/ip_esp.h`, + `netinet/ip_ether.h`, + `netinet/ip_gre.h`, + `netinet/ip_ipcomp.h`, + `netinet/ip_ipip.h`, + `netinet/pim_var.h`, + `netinet/tcp_var.h`, + `netinet/udp_var.h`, + `netinet6/in6.h`, + `netinet6/ip6_divert.h`, + `netinet6/pim6_var.h`, + `netinet/icmp6.h`, + `netmpls/mpls.h`, + } + + ctls := [...]string{ + `kern`, + `vm`, + `fs`, + `net`, + //debug /* Special handling required */ + `hw`, + //machdep /* Arch specific */ + `user`, + `ddb`, + //vfs /* Special handling required */ + `fs.posix`, + `kern.forkstat`, + `kern.intrcnt`, + `kern.malloc`, + `kern.nchstats`, + `kern.seminfo`, + `kern.shminfo`, + `kern.timecounter`, + `kern.tty`, + `kern.watchdog`, + `net.bpf`, + `net.ifq`, + `net.inet`, + `net.inet.ah`, + `net.inet.carp`, + `net.inet.divert`, + `net.inet.esp`, + `net.inet.etherip`, + `net.inet.gre`, + `net.inet.icmp`, + `net.inet.igmp`, + `net.inet.ip`, + `net.inet.ip.ifq`, + `net.inet.ipcomp`, + `net.inet.ipip`, + `net.inet.mobileip`, + `net.inet.pfsync`, + `net.inet.pim`, + `net.inet.tcp`, + `net.inet.udp`, + `net.inet6`, + `net.inet6.divert`, + `net.inet6.ip6`, + `net.inet6.icmp6`, + `net.inet6.pim6`, + `net.inet6.tcp6`, + `net.inet6.udp6`, + `net.mpls`, + `net.mpls.ifq`, + `net.key`, + `net.pflow`, + `net.pfsync`, + `net.pipex`, + `net.rt`, + `vm.swapencrypt`, + //vfsgenctl /* Special handling required */ + } + + // Node name "fixups" + ctlMap := map[string]string{ + "ipproto": "net.inet", + "net.inet.ipproto": "net.inet", + "net.inet6.ipv6proto": "net.inet6", + "net.inet6.ipv6": "net.inet6.ip6", + "net.inet.icmpv6": "net.inet6.icmp6", + "net.inet6.divert6": "net.inet6.divert", + "net.inet6.tcp6": "net.inet.tcp", + "net.inet6.udp6": "net.inet.udp", + "mpls": "net.mpls", + "swpenc": "vm.swapencrypt", + } + + // Node mappings + nodeMap = map[string]string{ + "net.inet.ip.ifq": "net.ifq", + "net.inet.pfsync": "net.pfsync", + "net.mpls.ifq": "net.ifq", + } + + mCtls := make(map[string]bool) + for _, ctl := range ctls { + mCtls[ctl] = true + } + + for _, header := range headers { + debug("Processing " + header) + file, err := os.Open(filepath.Join("/usr/include", header)) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + var sub []string + if reMatch(ctlNames1RE, s.Text(), &sub) || + reMatch(ctlNames2RE, s.Text(), &sub) || + reMatch(ctlNames3RE, s.Text(), &sub) { + if sub[1] == `CTL_NAMES` { + // Top level. + node = &mib + } else { + // Node. + nodename := strings.ToLower(sub[2]) + ctlName := "" + if reMatch(netInetRE, header, &sub) { + ctlName = "net.inet." + nodename + } else if reMatch(netInet6RE, header, &sub) { + ctlName = "net.inet6." + nodename + } else if reMatch(netRE, header, &sub) { + ctlName = "net." + nodename + } else { + ctlName = nodename + ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) + } + + if val, ok := ctlMap[ctlName]; ok { + ctlName = val + } + if _, ok := mCtls[ctlName]; !ok { + debug("Ignoring " + ctlName + "...") + continue + } + + // Walk down from the top of the MIB. + node = &mib + for _, part := range strings.Split(ctlName, ".") { + if _, ok := (*node)[part]; !ok { + debug("Missing node " + part) + (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} + } + node = (*node)[part].pE + } + } + + // Populate current node with entries. + i := -1 + for !strings.HasPrefix(s.Text(), "}") { + s.Scan() + if reMatch(bracesRE, s.Text(), &sub) { + i++ + } + if !reMatch(ctlTypeRE, s.Text(), &sub) { + continue + } + (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} + } + } + } + err = s.Err() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + file.Close() + } + buildSysctl(&mib, "", []int{}) + + sort.Strings(sysCtl) + text := strings.Join(sysCtl, "") + + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; DO NOT EDIT. + +// +build %s + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry { +%s +} +` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go new file mode 100644 index 0000000000..baa6ecd850 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksysnum.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Generate system call table for DragonFly, NetBSD, +// FreeBSD, OpenBSD or Darwin from master list +// (for example, /usr/src/sys/kern/syscalls.master or +// sys/syscall.h). +package main + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "strings" +) + +var ( + goos, goarch string +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return fmt.Sprintf("%s,%s", goarch, goos) +} + +func checkErr(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +// source string and substring slice for regexp +type re struct { + str string // source string + sub []string // matched sub-string +} + +// Match performs regular expression match +func (r *re) Match(exp string) bool { + r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) + if r.sub != nil { + return true + } + return false +} + +// fetchFile fetches a text file from URL +func fetchFile(URL string) io.Reader { + resp, err := http.Get(URL) + checkErr(err) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + checkErr(err) + return strings.NewReader(string(body)) +} + +// readFile reads a text file from path +func readFile(path string) io.Reader { + file, err := os.Open(os.Args[1]) + checkErr(err) + return file +} + +func format(name, num, proto string) string { + name = strings.ToUpper(name) + // There are multiple entries for enosys and nosys, so comment them out. + nm := re{str: name} + if nm.Match(`^SYS_E?NOSYS$`) { + name = fmt.Sprintf("// %s", name) + } + if name == `SYS_SYS_EXIT` { + name = `SYS_EXIT` + } + return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +} + +func main() { + // Get the OS (using GOOS_TARGET if it exist) + goos = os.Getenv("GOOS_TARGET") + if goos == "" { + goos = os.Getenv("GOOS") + } + // Get the architecture (using GOARCH_TARGET if it exists) + goarch = os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check if GOOS and GOARCH environment variables are defined + if goarch == "" || goos == "" { + fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") + os.Exit(1) + } + + file := strings.TrimSpace(os.Args[1]) + var syscalls io.Reader + if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { + // Download syscalls.master file + syscalls = fetchFile(file) + } else { + syscalls = readFile(file) + } + + var text, line string + s := bufio.NewScanner(syscalls) + for s.Scan() { + t := re{str: line} + if t.Match(`^(.*)\\$`) { + // Handle continuation + line = t.sub[1] + line += strings.TrimLeft(s.Text(), " \t") + } else { + // New line + line = s.Text() + } + t = re{str: line} + if t.Match(`\\$`) { + continue + } + t = re{str: line} + + switch goos { + case "dragonfly": + if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { + num, proto := t.sub[1], t.sub[2] + name := fmt.Sprintf("SYS_%s", t.sub[3]) + text += format(name, num, proto) + } + case "freebsd": + if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { + num, proto := t.sub[1], t.sub[2] + name := fmt.Sprintf("SYS_%s", t.sub[3]) + text += format(name, num, proto) + } + case "openbsd": + if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { + num, proto, name := t.sub[1], t.sub[3], t.sub[4] + text += format(name, num, proto) + } + case "netbsd": + if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { + num, proto, compat := t.sub[1], t.sub[6], t.sub[8] + name := t.sub[7] + "_" + t.sub[9] + if t.sub[11] != "" { + name = t.sub[7] + "_" + t.sub[11] + } + name = strings.ToUpper(name) + if compat == "" || compat == "13" || compat == "30" || compat == "50" { + text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) + } + } + case "darwin": + if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { + name, num := t.sub[1], t.sub[2] + name = strings.ToUpper(name) + text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) + } + default: + fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) + os.Exit(1) + + } + } + err := s.Err() + checkErr(err) + + fmt.Printf(template, cmdLine(), buildTags(), text) +} + +const template = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +const( +%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go new file mode 100644 index 0000000000..40d2beede5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_aix.go @@ -0,0 +1,237 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore +// +build aix + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + + +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type off64 C.off64_t +type off C.off_t +type Mode_t C.mode_t + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Timex C.struct_timex + +type Time_t C.time_t + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +type Timezone C.struct_timezone + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit64 + +type Pid_t C.pid_t + +type _Gid_t C.gid_t + +type dev_t C.dev_t + +// Files + +type Stat_t C.struct_stat + +type StatxTimestamp C.struct_statx_timestamp + +type Statx_t C.struct_statx + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Cmsghdr C.struct_cmsghdr + +type ICMPv6Filter C.struct_icmp6_filter + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type Linger C.struct_linger + +type Msghdr C.struct_msghdr + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr +) + +type IfMsgHdr C.struct_if_msghdr + +// Misc + +type FdSet C.fd_set + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +type Sigset_t C.sigset_t + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +//poll + +type PollFd struct { + Fd int32 + Events uint16 + Revents uint16 +} + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +//flock_t + +type Flock_t C.struct_flock64 + +// Statfs + +type Fsid_t C.struct_fsid_t +type Fsid64_t C.struct_fsid64_t + +type Statfs_t C.struct_statfs + +const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go new file mode 100644 index 0000000000..155c2e692b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define __DARWIN_UNIX03 0 +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat64 + +type Statfs_t C.struct_statfs64 + +type Flock_t C.struct_flock + +type Fstore_t C.struct_fstore + +type Radvisory_t C.struct_radvisory + +type Fbootstraptransfer_t C.struct_fbootstraptransfer + +type Log2phys_t C.struct_log2phys + +type Fsid C.struct_fsid + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfmaMsghdr2 C.struct_ifma_msghdr2 + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// uname + +type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go new file mode 100644 index 0000000000..3365dd79d0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -0,0 +1,263 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go new file mode 100644 index 0000000000..a121dc3368 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -0,0 +1,400 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define _WANT_FREEBSD11_STAT 1 +#define _WANT_FREEBSD11_STATFS 1 +#define _WANT_FREEBSD11_DIRENT 1 +#define _WANT_FREEBSD11_KEVENT 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_data8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; +// FIXME: these are now unions, so maybe need to change definitions? +#undef ifi_epoch + time_t ifi_epoch; +#undef ifi_lastchange + struct timeval ifi_lastchange; +}; + +// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_msghdr8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data8 ifm_data; +}; +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( + _statfsVersion = C.STATFS_VERSION + _dirblksiz = C.DIRBLKSIZ +) + +type Stat_t C.struct_stat + +type stat_freebsd11_t C.struct_freebsd11_stat + +type Statfs_t C.struct_statfs + +type statfs_freebsd11_t C.struct_freebsd11_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type dirent_freebsd11 C.struct_freebsd11_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_ATTACH = C.PT_ATTACH + PTRACE_CONT = C.PT_CONTINUE + PTRACE_DETACH = C.PT_DETACH + PTRACE_GETFPREGS = C.PT_GETFPREGS + PTRACE_GETFSBASE = C.PT_GETFSBASE + PTRACE_GETLWPLIST = C.PT_GETLWPLIST + PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS + PTRACE_GETREGS = C.PT_GETREGS + PTRACE_GETXSTATE = C.PT_GETXSTATE + PTRACE_IO = C.PT_IO + PTRACE_KILL = C.PT_KILL + PTRACE_LWPEVENTS = C.PT_LWP_EVENTS + PTRACE_LWPINFO = C.PT_LWPINFO + PTRACE_SETFPREGS = C.PT_SETFPREGS + PTRACE_SETREGS = C.PT_SETREGS + PTRACE_SINGLESTEP = C.PT_STEP + PTRACE_TRACEME = C.PT_TRACE_ME +) + +const ( + PIOD_READ_D = C.PIOD_READ_D + PIOD_WRITE_D = C.PIOD_WRITE_D + PIOD_READ_I = C.PIOD_READ_I + PIOD_WRITE_I = C.PIOD_WRITE_I +) + +const ( + PL_FLAG_BORN = C.PL_FLAG_BORN + PL_FLAG_EXITED = C.PL_FLAG_EXITED + PL_FLAG_SI = C.PL_FLAG_SI +) + +const ( + TRAP_BRKPT = C.TRAP_BRKPT + TRAP_TRACE = C.TRAP_TRACE +) + +type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo + +type __Siginfo C.struct___siginfo + +type Sigset_t C.sigset_t + +type Reg C.struct_reg + +type FpReg C.struct_fpreg + +type PtraceIoDesc C.struct_ptrace_io_desc + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent_freebsd11 + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + sizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 + sizeofIfData = C.sizeof_struct_if_data + SizeofIfData = C.sizeof_struct_if_data8 + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type ifMsghdr C.struct_if_msghdr + +type IfMsghdr C.struct_if_msghdr8 + +type ifData C.struct_if_data + +type IfData C.struct_if_data8 + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr + SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfZbuf C.struct_bpf_zbuf + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfZbufHeader C.struct_bpf_zbuf_header + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLINIGNEOF = C.POLLINIGNEOF + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Capabilities + +type CapRights C.struct_cap_rights + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go new file mode 100644 index 0000000000..4a96d72c37 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -0,0 +1,290 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +type Ptmget C.struct_ptmget + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Sysctl + +type Sysctlnode C.struct_sysctlnode + +// Uname + +type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go new file mode 100644 index 0000000000..775cb57dc8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Signal Sets + +type Sigset_t C.sigset_t + +// Uname + +type Utsname C.struct_utsname + +// Uvmexp + +const SizeofUvmexp = C.sizeof_struct_uvmexp + +type Uvmexp C.struct_uvmexp + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go new file mode 100644 index 0000000000..2b716f9348 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -0,0 +1,266 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +// These defines ensure that builds done on newer versions of Solaris are +// backwards-compatible with older versions of Solaris and +// OpenSolaris-based derivatives. +#define __USE_SUNOS_SOCKETS__ // msghdr +#define __USE_LEGACY_PROTOTYPES__ // iovec +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX + MaxHostNameLen = C.MAXHOSTNAMELEN +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +// Filesystems + +type _Fsblkcnt_t C.fsblkcnt_t + +type Statvfs_t C.struct_statvfs + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Select + +type FdSet C.fd_set + +// Misc + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_EACCESS = C.AT_EACCESS +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfTimeval C.struct_bpf_timeval + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go new file mode 100644 index 0000000000..987fc169cc --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen.go @@ -0,0 +1,133 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +var outputFile = flag.String("out", "tables.go", "output file") + +func main() { + gen.Init() + gen.Repackage("gen_trieval.go", "trieval.go", "bidi") + gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") + + genTables() +} + +// bidiClass names and codes taken from class "bc" in +// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +var bidiClass = map[string]Class{ + "AL": AL, // ArabicLetter + "AN": AN, // ArabicNumber + "B": B, // ParagraphSeparator + "BN": BN, // BoundaryNeutral + "CS": CS, // CommonSeparator + "EN": EN, // EuropeanNumber + "ES": ES, // EuropeanSeparator + "ET": ET, // EuropeanTerminator + "L": L, // LeftToRight + "NSM": NSM, // NonspacingMark + "ON": ON, // OtherNeutral + "R": R, // RightToLeft + "S": S, // SegmentSeparator + "WS": WS, // WhiteSpace + + "FSI": Control, + "PDF": Control, + "PDI": Control, + "LRE": Control, + "LRI": Control, + "LRO": Control, + "RLE": Control, + "RLI": Control, + "RLO": Control, +} + +func genTables() { + if numClass > 0x0F { + log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) + } + w := gen.NewCodeWriter() + defer w.WriteVersionedGoFile(*outputFile, "bidi") + + gen.WriteUnicodeVersion(w) + + t := triegen.NewTrie("bidi") + + // Build data about bracket mapping. These bits need to be or-ed with + // any other bits. + orMask := map[rune]uint64{} + + xorMap := map[rune]int{} + xorMasks := []rune{0} // First value is no-op. + + ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { + r1 := p.Rune(0) + r2 := p.Rune(1) + xor := r1 ^ r2 + if _, ok := xorMap[xor]; !ok { + xorMap[xor] = len(xorMasks) + xorMasks = append(xorMasks, xor) + } + entry := uint64(xorMap[xor]) << xorMaskShift + switch p.String(2) { + case "o": + entry |= openMask + case "c", "n": + default: + log.Fatalf("Unknown bracket class %q.", p.String(2)) + } + orMask[r1] = entry + }) + + w.WriteComment(` + xorMasks contains masks to be xor-ed with brackets to get the reverse + version.`) + w.WriteVar("xorMasks", xorMasks) + + done := map[rune]bool{} + + insert := func(r rune, c Class) { + if !done[r] { + t.Insert(r, orMask[r]|uint64(c)) + done[r] = true + } + } + + // Insert the derived BiDi properties. + ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + class, ok := bidiClass[p.String(1)] + if !ok { + log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) + } + insert(r, class) + }) + visitDefaults(insert) + + // TODO: use sparse blocks. This would reduce table size considerably + // from the looks of it. + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +// dummy values to make methods in gen_common compile. The real versions +// will be generated by this file to tables.go. +var ( + xorMasks []rune +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go new file mode 100644 index 0000000000..02c3b505d6 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/rangetable" +) + +// These tables are hand-extracted from: +// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +func visitDefaults(fn func(r rune, c Class)) { + // first write default values for ranges listed above. + visitRunes(fn, AL, []rune{ + 0x0600, 0x07BF, // Arabic + 0x08A0, 0x08FF, // Arabic Extended-A + 0xFB50, 0xFDCF, // Arabic Presentation Forms + 0xFDF0, 0xFDFF, + 0xFE70, 0xFEFF, + 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols + }) + visitRunes(fn, R, []rune{ + 0x0590, 0x05FF, // Hebrew + 0x07C0, 0x089F, // Nko et al. + 0xFB1D, 0xFB4F, + 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. + 0x0001E800, 0x0001EDFF, + 0x0001EF00, 0x0001EFFF, + }) + visitRunes(fn, ET, []rune{ // European Terminator + 0x20A0, 0x20Cf, // Currency symbols + }) + rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { + fn(r, BN) // Boundary Neutral + }) + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + fn(p.Rune(0), BN) // Boundary Neutral + } + }) +} + +func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { + for i := 0; i < len(runes); i += 2 { + lo, hi := runes[i], runes[i+1] + for j := lo; j <= hi; j++ { + fn(j, c) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go new file mode 100644 index 0000000000..9cb9942894 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go @@ -0,0 +1,64 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go new file mode 100644 index 0000000000..30a3aa9334 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/maketables.go @@ -0,0 +1,986 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Normalization table generator. +// Data read from the web. +// See forminfo.go for a description of the trie values associated with each rune. + +package main + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "log" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +func main() { + gen.Init() + loadUnicodeData() + compactCCC() + loadCompositionExclusions() + completeCharFields(FCanonical) + completeCharFields(FCompatibility) + computeNonStarterCounts() + verifyComputed() + printChars() + testDerived() + printTestdata() + makeTables() +} + +var ( + tablelist = flag.String("tables", + "all", + "comma-separated list of which tables to generate; "+ + "can be 'decomp', 'recomp', 'info' and 'all'") + test = flag.Bool("test", + false, + "test existing tables against DerivedNormalizationProps and generate test data for regression testing") + verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +) + +const MaxChar = 0x10FFFF // anything above this shouldn't exist + +// Quick Check properties of runes allow us to quickly +// determine whether a rune may occur in a normal form. +// For a given normal form, a rune may be guaranteed to occur +// verbatim (QC=Yes), may or may not combine with another +// rune (QC=Maybe), or may not occur (QC=No). +type QCResult int + +const ( + QCUnknown QCResult = iota + QCYes + QCNo + QCMaybe +) + +func (r QCResult) String() string { + switch r { + case QCYes: + return "Yes" + case QCNo: + return "No" + case QCMaybe: + return "Maybe" + } + return "***UNKNOWN***" +} + +const ( + FCanonical = iota // NFC or NFD + FCompatibility // NFKC or NFKD + FNumberOfFormTypes +) + +const ( + MComposed = iota // NFC or NFKC + MDecomposed // NFD or NFKD + MNumberOfModes +) + +// This contains only the properties we're interested in. +type Char struct { + name string + codePoint rune // if zero, this index is not a valid code point. + ccc uint8 // canonical combining class + origCCC uint8 + excludeInComp bool // from CompositionExclusions.txt + compatDecomp bool // it has a compatibility expansion + + nTrailingNonStarters uint8 + nLeadingNonStarters uint8 // must be equal to trailing if non-zero + + forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility + + state State +} + +var chars = make([]Char, MaxChar+1) +var cccMap = make(map[uint8]uint8) + +func (c Char) String() string { + buf := new(bytes.Buffer) + + fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) + fmt.Fprintf(buf, " ccc: %v\n", c.ccc) + fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) + fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) + fmt.Fprintf(buf, " state: %v\n", c.state) + fmt.Fprintf(buf, " NFC:\n") + fmt.Fprint(buf, c.forms[FCanonical]) + fmt.Fprintf(buf, " NFKC:\n") + fmt.Fprint(buf, c.forms[FCompatibility]) + + return buf.String() +} + +// In UnicodeData.txt, some ranges are marked like this: +// 3400;;Lo;0;L;;;;;N;;;;; +// 4DB5;;Lo;0;L;;;;;N;;;;; +// parseCharacter keeps a state variable indicating the weirdness. +type State int + +const ( + SNormal State = iota // known to be zero for the type + SFirst + SLast + SMissing +) + +var lastChar = rune('\u0000') + +func (c Char) isValid() bool { + return c.codePoint != 0 && c.state != SMissing +} + +type FormInfo struct { + quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed + verified [MNumberOfModes]bool // index: MComposed or MDecomposed + + combinesForward bool // May combine with rune on the right + combinesBackward bool // May combine with rune on the left + isOneWay bool // Never appears in result + inDecomp bool // Some decompositions result in this char. + decomp Decomposition + expandedDecomp Decomposition +} + +func (f FormInfo) String() string { + buf := bytes.NewBuffer(make([]byte, 0)) + + fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) + fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) + fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) + fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) + fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) + fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) + fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) + fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) + + return buf.String() +} + +type Decomposition []rune + +func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { + decomp := strings.Split(s, " ") + if len(decomp) > 0 && skipfirst { + decomp = decomp[1:] + } + for _, d := range decomp { + point, err := strconv.ParseUint(d, 16, 64) + if err != nil { + return a, err + } + a = append(a, rune(point)) + } + return a, nil +} + +func loadUnicodeData() { + f := gen.OpenUCDFile("UnicodeData.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + r := p.Rune(ucd.CodePoint) + char := &chars[r] + + char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) + decmap := p.String(ucd.DecompMapping) + + exp, err := parseDecomposition(decmap, false) + isCompat := false + if err != nil { + if len(decmap) > 0 { + exp, err = parseDecomposition(decmap, true) + if err != nil { + log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) + } + isCompat = true + } + } + + char.name = p.String(ucd.Name) + char.codePoint = r + char.forms[FCompatibility].decomp = exp + if !isCompat { + char.forms[FCanonical].decomp = exp + } else { + char.compatDecomp = true + } + if len(decmap) > 0 { + char.forms[FCompatibility].decomp = exp + } + } + if err := p.Err(); err != nil { + log.Fatal(err) + } +} + +// compactCCC converts the sparse set of CCC values to a continguous one, +// reducing the number of bits needed from 8 to 6. +func compactCCC() { + m := make(map[uint8]uint8) + for i := range chars { + c := &chars[i] + m[c.ccc] = 0 + } + cccs := []int{} + for v, _ := range m { + cccs = append(cccs, int(v)) + } + sort.Ints(cccs) + for i, c := range cccs { + cccMap[uint8(i)] = uint8(c) + m[uint8(c)] = uint8(i) + } + for i := range chars { + c := &chars[i] + c.origCCC = c.ccc + c.ccc = m[c.ccc] + } + if len(m) >= 1<<6 { + log.Fatalf("too many difference CCC values: %d >= 64", len(m)) + } +} + +// CompositionExclusions.txt has form: +// 0958 # ... +// See https://unicode.org/reports/tr44/ for full explanation +func loadCompositionExclusions() { + f := gen.OpenUCDFile("CompositionExclusions.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + c := &chars[p.Rune(0)] + if c.excludeInComp { + log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) + } + c.excludeInComp = true + } + if e := p.Err(); e != nil { + log.Fatal(e) + } +} + +// hasCompatDecomp returns true if any of the recursive +// decompositions contains a compatibility expansion. +// In this case, the character may not occur in NFK*. +func hasCompatDecomp(r rune) bool { + c := &chars[r] + if c.compatDecomp { + return true + } + for _, d := range c.forms[FCompatibility].decomp { + if hasCompatDecomp(d) { + return true + } + } + return false +} + +// Hangul related constants. +const ( + HangulBase = 0xAC00 + HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) + + JamoLBase = 0x1100 + JamoLEnd = 0x1113 + JamoVBase = 0x1161 + JamoVEnd = 0x1176 + JamoTBase = 0x11A8 + JamoTEnd = 0x11C3 + + JamoLVTCount = 19 * 21 * 28 + JamoTCount = 28 +) + +func isHangul(r rune) bool { + return HangulBase <= r && r < HangulEnd +} + +func isHangulWithoutJamoT(r rune) bool { + if !isHangul(r) { + return false + } + r -= HangulBase + return r < JamoLVTCount && r%JamoTCount == 0 +} + +func ccc(r rune) uint8 { + return chars[r].ccc +} + +// Insert a rune in a buffer, ordered by Canonical Combining Class. +func insertOrdered(b Decomposition, r rune) Decomposition { + n := len(b) + b = append(b, 0) + cc := ccc(r) + if cc > 0 { + // Use bubble sort. + for ; n > 0; n-- { + if ccc(b[n-1]) <= cc { + break + } + b[n] = b[n-1] + } + } + b[n] = r + return b +} + +// Recursively decompose. +func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { + dcomp := chars[r].forms[form].decomp + if len(dcomp) == 0 { + return insertOrdered(d, r) + } + for _, c := range dcomp { + d = decomposeRecursive(form, c, d) + } + return d +} + +func completeCharFields(form int) { + // Phase 0: pre-expand decomposition. + for i := range chars { + f := &chars[i].forms[form] + if len(f.decomp) == 0 { + continue + } + exp := make(Decomposition, 0) + for _, c := range f.decomp { + exp = decomposeRecursive(form, c, exp) + } + f.expandedDecomp = exp + } + + // Phase 1: composition exclusion, mark decomposition. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + // Marks script-specific exclusions and version restricted. + f.isOneWay = c.excludeInComp + + // Singletons + f.isOneWay = f.isOneWay || len(f.decomp) == 1 + + // Non-starter decompositions + if len(f.decomp) > 1 { + chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 + f.isOneWay = f.isOneWay || chk + } + + // Runes that decompose into more than two runes. + f.isOneWay = f.isOneWay || len(f.decomp) > 2 + + if form == FCompatibility { + f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) + } + + for _, r := range f.decomp { + chars[r].forms[form].inDecomp = true + } + } + + // Phase 2: forward and backward combining. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + if !f.isOneWay && len(f.decomp) == 2 { + f0 := &chars[f.decomp[0]].forms[form] + f1 := &chars[f.decomp[1]].forms[form] + if !f0.isOneWay { + f0.combinesForward = true + } + if !f1.isOneWay { + f1.combinesBackward = true + } + } + if isHangulWithoutJamoT(rune(i)) { + f.combinesForward = true + } + } + + // Phase 3: quick check values. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + switch { + case len(f.decomp) > 0: + f.quickCheck[MDecomposed] = QCNo + case isHangul(rune(i)): + f.quickCheck[MDecomposed] = QCNo + default: + f.quickCheck[MDecomposed] = QCYes + } + switch { + case f.isOneWay: + f.quickCheck[MComposed] = QCNo + case (i & 0xffff00) == JamoLBase: + f.quickCheck[MComposed] = QCYes + if JamoLBase <= i && i < JamoLEnd { + f.combinesForward = true + } + if JamoVBase <= i && i < JamoVEnd { + f.quickCheck[MComposed] = QCMaybe + f.combinesBackward = true + f.combinesForward = true + } + if JamoTBase <= i && i < JamoTEnd { + f.quickCheck[MComposed] = QCMaybe + f.combinesBackward = true + } + case !f.combinesBackward: + f.quickCheck[MComposed] = QCYes + default: + f.quickCheck[MComposed] = QCMaybe + } + } +} + +func computeNonStarterCounts() { + // Phase 4: leading and trailing non-starter count + for i := range chars { + c := &chars[i] + + runes := []rune{rune(i)} + // We always use FCompatibility so that the CGJ insertion points do not + // change for repeated normalizations with different forms. + if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { + runes = exp + } + // We consider runes that combine backwards to be non-starters for the + // purpose of Stream-Safe Text Processing. + for _, r := range runes { + if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { + break + } + c.nLeadingNonStarters++ + } + for i := len(runes) - 1; i >= 0; i-- { + if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { + break + } + c.nTrailingNonStarters++ + } + if c.nTrailingNonStarters > 3 { + log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) + } + + if isHangul(rune(i)) { + c.nTrailingNonStarters = 2 + if isHangulWithoutJamoT(rune(i)) { + c.nTrailingNonStarters = 1 + } + } + + if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { + log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) + } + if t := c.nTrailingNonStarters; t > 3 { + log.Fatalf("%U: number of trailing non-starters is %d > 3", t) + } + } +} + +func printBytes(w io.Writer, b []byte, name string) { + fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) + fmt.Fprintf(w, "var %s = [...]byte {", name) + for i, c := range b { + switch { + case i%64 == 0: + fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) + case i%8 == 0: + fmt.Fprintf(w, "\n") + } + fmt.Fprintf(w, "0x%.2X, ", c) + } + fmt.Fprint(w, "\n}\n\n") +} + +// See forminfo.go for format. +func makeEntry(f *FormInfo, c *Char) uint16 { + e := uint16(0) + if r := c.codePoint; HangulBase <= r && r < HangulEnd { + e |= 0x40 + } + if f.combinesForward { + e |= 0x20 + } + if f.quickCheck[MDecomposed] == QCNo { + e |= 0x4 + } + switch f.quickCheck[MComposed] { + case QCYes: + case QCNo: + e |= 0x10 + case QCMaybe: + e |= 0x18 + default: + log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) + } + e |= uint16(c.nTrailingNonStarters) + return e +} + +// decompSet keeps track of unique decompositions, grouped by whether +// the decomposition is followed by a trailing and/or leading CCC. +type decompSet [7]map[string]bool + +const ( + normalDecomp = iota + firstMulti + firstCCC + endMulti + firstLeadingCCC + firstCCCZeroExcept + firstStarterWithNLead + lastDecomp +) + +var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} + +func makeDecompSet() decompSet { + m := decompSet{} + for i := range m { + m[i] = make(map[string]bool) + } + return m +} +func (m *decompSet) insert(key int, s string) { + m[key][s] = true +} + +func printCharInfoTables(w io.Writer) int { + mkstr := func(r rune, f *FormInfo) (int, string) { + d := f.expandedDecomp + s := string([]rune(d)) + if max := 1 << 6; len(s) >= max { + const msg = "%U: too many bytes in decomposition: %d >= %d" + log.Fatalf(msg, r, len(s), max) + } + head := uint8(len(s)) + if f.quickCheck[MComposed] != QCYes { + head |= 0x40 + } + if f.combinesForward { + head |= 0x80 + } + s = string([]byte{head}) + s + + lccc := ccc(d[0]) + tccc := ccc(d[len(d)-1]) + cc := ccc(r) + if cc != 0 && lccc == 0 && tccc == 0 { + log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) + } + if tccc < lccc && lccc != 0 { + const msg = "%U: lccc (%d) must be <= tcc (%d)" + log.Fatalf(msg, r, lccc, tccc) + } + index := normalDecomp + nTrail := chars[r].nTrailingNonStarters + nLead := chars[r].nLeadingNonStarters + if tccc > 0 || lccc > 0 || nTrail > 0 { + tccc <<= 2 + tccc |= nTrail + s += string([]byte{tccc}) + index = endMulti + for _, r := range d[1:] { + if ccc(r) == 0 { + index = firstCCC + } + } + if lccc > 0 || nLead > 0 { + s += string([]byte{lccc}) + if index == firstCCC { + log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) + } + index = firstLeadingCCC + } + if cc != lccc { + if cc != 0 { + log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) + } + index = firstCCCZeroExcept + } + } else if len(d) > 1 { + index = firstMulti + } + return index, s + } + + decompSet := makeDecompSet() + const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. + decompSet.insert(firstStarterWithNLead, nLeadStr) + + // Store the uniqued decompositions in a byte buffer, + // preceded by their byte length. + for _, c := range chars { + for _, f := range c.forms { + if len(f.expandedDecomp) == 0 { + continue + } + if f.combinesBackward { + log.Fatalf("%U: combinesBackward and decompose", c.codePoint) + } + index, s := mkstr(c.codePoint, &f) + decompSet.insert(index, s) + } + } + + decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) + size := 0 + positionMap := make(map[string]uint16) + decompositions.WriteString("\000") + fmt.Fprintln(w, "const (") + for i, m := range decompSet { + sa := []string{} + for s := range m { + sa = append(sa, s) + } + sort.Strings(sa) + for _, s := range sa { + p := decompositions.Len() + decompositions.WriteString(s) + positionMap[s] = uint16(p) + } + if cname[i] != "" { + fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) + } + } + fmt.Fprintln(w, "maxDecomp = 0x8000") + fmt.Fprintln(w, ")") + b := decompositions.Bytes() + printBytes(w, b, "decomps") + size += len(b) + + varnames := []string{"nfc", "nfkc"} + for i := 0; i < FNumberOfFormTypes; i++ { + trie := triegen.NewTrie(varnames[i]) + + for r, c := range chars { + f := c.forms[i] + d := f.expandedDecomp + if len(d) != 0 { + _, key := mkstr(c.codePoint, &f) + trie.Insert(rune(r), uint64(positionMap[key])) + if c.ccc != ccc(d[0]) { + // We assume the lead ccc of a decomposition !=0 in this case. + if ccc(d[0]) == 0 { + log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) + } + } + } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { + // Handle cases where it can't be detected that the nLead should be equal + // to nTrail. + trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) + } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { + trie.Insert(c.codePoint, uint64(0x8000|v)) + } + } + sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) + if err != nil { + log.Fatal(err) + } + size += sz + } + return size +} + +func contains(sa []string, s string) bool { + for _, a := range sa { + if a == s { + return true + } + } + return false +} + +func makeTables() { + w := &bytes.Buffer{} + + size := 0 + if *tablelist == "" { + return + } + list := strings.Split(*tablelist, ",") + if *tablelist == "all" { + list = []string{"recomp", "info"} + } + + // Compute maximum decomposition size. + max := 0 + for _, c := range chars { + if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { + max = n + } + } + fmt.Fprintln(w, `import "sync"`) + fmt.Fprintln(w) + + fmt.Fprintln(w, "const (") + fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") + fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) + fmt.Fprintln(w) + fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") + fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") + fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") + fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") + fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) + fmt.Fprintln(w, ")\n") + + // Print the CCC remap table. + size += len(cccMap) + fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) + for i := 0; i < len(cccMap); i++ { + if i%8 == 0 { + fmt.Fprintln(w) + } + fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) + } + fmt.Fprintln(w, "\n}\n") + + if contains(list, "info") { + size += printCharInfoTables(w) + } + + if contains(list, "recomp") { + // Note that we use 32 bit keys, instead of 64 bit. + // This clips the bits of three entries, but we know + // this won't cause a collision. The compiler will catch + // any changes made to UnicodeData.txt that introduces + // a collision. + // Note that the recomposition map for NFC and NFKC + // are identical. + + // Recomposition map + nrentries := 0 + for _, c := range chars { + f := c.forms[FCanonical] + if !f.isOneWay && len(f.decomp) > 0 { + nrentries++ + } + } + sz := nrentries * 8 + size += sz + fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) + fmt.Fprintln(w, "var recompMap map[uint32]rune") + fmt.Fprintln(w, "var recompMapOnce sync.Once\n") + fmt.Fprintln(w, `const recompMapPacked = "" +`) + var buf [8]byte + for i, c := range chars { + f := c.forms[FCanonical] + d := f.decomp + if !f.isOneWay && len(d) > 0 { + key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) + binary.BigEndian.PutUint32(buf[:4], key) + binary.BigEndian.PutUint32(buf[4:], uint32(i)) + fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) + } + } + // hack so we don't have to special case the trailing plus sign + fmt.Fprintf(w, ` ""`) + fmt.Fprintln(w) + } + + fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) + gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) +} + +func printChars() { + if *verbose { + for _, c := range chars { + if !c.isValid() || c.state == SMissing { + continue + } + fmt.Println(c) + } + } +} + +// verifyComputed does various consistency tests. +func verifyComputed() { + for i, c := range chars { + for _, f := range c.forms { + isNo := (f.quickCheck[MDecomposed] == QCNo) + if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { + log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) + } + + isMaybe := f.quickCheck[MComposed] == QCMaybe + if f.combinesBackward != isMaybe { + log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) + } + if len(f.decomp) > 0 && f.combinesForward && isMaybe { + log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) + } + + if len(f.expandedDecomp) != 0 { + continue + } + if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { + // We accept these runes to be treated differently (it only affects + // segment breaking in iteration, most likely on improper use), but + // reconsider if more characters are added. + // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; + // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; + // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; + // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; + // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; + // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; + if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { + log.Fatalf("%U: nLead was %v; want %v", i, a, b) + } + } + } + nfc := c.forms[FCanonical] + nfkc := c.forms[FCompatibility] + if nfc.combinesBackward != nfkc.combinesBackward { + log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) + } + } +} + +// Use values in DerivedNormalizationProps.txt to compare against the +// values we computed. +// DerivedNormalizationProps.txt has form: +// 00C0..00C5 ; NFD_QC; N # ... +// 0374 ; NFD_QC; N # ... +// See https://unicode.org/reports/tr44/ for full explanation +func testDerived() { + f := gen.OpenUCDFile("DerivedNormalizationProps.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + r := p.Rune(0) + c := &chars[r] + + var ftype, mode int + qt := p.String(1) + switch qt { + case "NFC_QC": + ftype, mode = FCanonical, MComposed + case "NFD_QC": + ftype, mode = FCanonical, MDecomposed + case "NFKC_QC": + ftype, mode = FCompatibility, MComposed + case "NFKD_QC": + ftype, mode = FCompatibility, MDecomposed + default: + continue + } + var qr QCResult + switch p.String(2) { + case "Y": + qr = QCYes + case "N": + qr = QCNo + case "M": + qr = QCMaybe + default: + log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) + } + if got := c.forms[ftype].quickCheck[mode]; got != qr { + log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) + } + c.forms[ftype].verified[mode] = true + } + if err := p.Err(); err != nil { + log.Fatal(err) + } + // Any unspecified value must be QCYes. Verify this. + for i, c := range chars { + for j, fd := range c.forms { + for k, qr := range fd.quickCheck { + if !fd.verified[k] && qr != QCYes { + m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" + log.Printf(m, i, j, k, qr, c.name) + } + } + } + } +} + +var testHeader = `const ( + Yes = iota + No + Maybe +) + +type formData struct { + qc uint8 + combinesForward bool + decomposition string +} + +type runeData struct { + r rune + ccc uint8 + nLead uint8 + nTrail uint8 + f [2]formData // 0: canonical; 1: compatibility +} + +func f(qc uint8, cf bool, dec string) [2]formData { + return [2]formData{{qc, cf, dec}, {qc, cf, dec}} +} + +func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { + return [2]formData{{qc, cf, d}, {qck, cfk, dk}} +} + +var testData = []runeData{ +` + +func printTestdata() { + type lastInfo struct { + ccc uint8 + nLead uint8 + nTrail uint8 + f string + } + + last := lastInfo{} + w := &bytes.Buffer{} + fmt.Fprintf(w, testHeader) + for r, c := range chars { + f := c.forms[FCanonical] + qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) + f = c.forms[FCompatibility] + qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) + s := "" + if d == dk && qc == qck && cf == cfk { + s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) + } else { + s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) + } + current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} + if last != current { + fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) + last = current + } + } + fmt.Fprintln(w, "}") + gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go new file mode 100644 index 0000000000..45d711900d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/triegen.go @@ -0,0 +1,117 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Trie table generator. +// Used by make*tables tools to generate a go file with trie data structures +// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte +// sequence are used to lookup offsets in the index table to be used for the +// next byte. The last byte is used to index into a table with 16-bit values. + +package main + +import ( + "fmt" + "io" +) + +const maxSparseEntries = 16 + +type normCompacter struct { + sparseBlocks [][]uint64 + sparseOffset []uint16 + sparseCount int + name string +} + +func mostFrequentStride(a []uint64) int { + counts := make(map[int]int) + var v int + for _, x := range a { + if stride := int(x) - v; v != 0 && stride >= 0 { + counts[stride]++ + } + v = int(x) + } + var maxs, maxc int + for stride, cnt := range counts { + if cnt > maxc || (cnt == maxc && stride < maxs) { + maxs, maxc = stride, cnt + } + } + return maxs +} + +func countSparseEntries(a []uint64) int { + stride := mostFrequentStride(a) + var v, count int + for _, tv := range a { + if int(tv)-v != stride { + if tv != 0 { + count++ + } + } + v = int(tv) + } + return count +} + +func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { + if n := countSparseEntries(v); n <= maxSparseEntries { + return (n+1)*4 + 2, true + } + return 0, false +} + +func (c *normCompacter) Store(v []uint64) uint32 { + h := uint32(len(c.sparseOffset)) + c.sparseBlocks = append(c.sparseBlocks, v) + c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) + c.sparseCount += countSparseEntries(v) + 1 + return h +} + +func (c *normCompacter) Handler() string { + return c.name + "Sparse.lookup" +} + +func (c *normCompacter) Print(w io.Writer) (retErr error) { + p := func(f string, x ...interface{}) { + if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { + retErr = err + } + } + + ls := len(c.sparseBlocks) + p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) + p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) + + ns := c.sparseCount + p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) + p("var %sSparseValues = [%d]valueRange {", c.name, ns) + for i, b := range c.sparseBlocks { + p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) + var v int + stride := mostFrequentStride(b) + n := countSparseEntries(b) + p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) + for i, nv := range b { + if int(nv)-v != stride { + if v != 0 { + p(",hi:%#02x},", 0x80+i-1) + } + if nv != 0 { + p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) + } + } + v = int(nv) + } + if v != 0 { + p(",hi:%#02x},", 0x80+len(b)-1) + } + } + p("\n}\n\n") + return +} diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go new file mode 100644 index 0000000000..092277e1f6 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for width operations. The generated table +// includes width category information as well as the normalization mappings. +package main + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" +) + +// See gen_common.go for flags. + +func main() { + gen.Init() + genTables() + genTests() + gen.Repackage("gen_trieval.go", "trieval.go", "width") + gen.Repackage("gen_common.go", "common_test.go", "width") +} + +func genTables() { + t := triegen.NewTrie("width") + // fold and inverse mappings. See mapComment for a description of the format + // of each entry. Add dummy value to make an index of 0 mean no mapping. + inverse := [][4]byte{{}} + mapping := map[[4]byte]int{[4]byte{}: 0} + + getWidthData(func(r rune, tag elem, alt rune) { + idx := 0 + if alt != 0 { + var buf [4]byte + buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) + s := string(r) + buf[buf[0]] ^= s[len(s)-1] + var ok bool + if idx, ok = mapping[buf]; !ok { + idx = len(mapping) + if idx > math.MaxUint8 { + log.Fatalf("Index %d does not fit in a byte.", idx) + } + mapping[buf] = idx + inverse = append(inverse, buf) + } + } + t.Insert(r, uint64(tag|elem(idx))) + }) + + w := &bytes.Buffer{} + gen.WriteUnicodeVersion(w) + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + + sz += writeMappings(w, inverse) + + fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) + + gen.WriteVersionedGoFile(*outputFile, "width", w.Bytes()) +} + +const inverseDataComment = ` +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8.` + +func writeMappings(w io.Writer, data [][4]byte) int { + fmt.Fprintln(w, inverseDataComment) + fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) + for _, x := range data { + fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) + } + fmt.Fprintln(w, "}") + return len(data) * 4 +} + +func genTests() { + w := &bytes.Buffer{} + fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") + getWidthData(func(r rune, tag elem, alt rune) { + if alt != 0 { + fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) + } + }) + fmt.Fprintln(w, "}") + gen.WriteGoFile("runes_test.go", "width", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go new file mode 100644 index 0000000000..601e752684 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_common.go @@ -0,0 +1,96 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This code is shared between the main code generator and the test code. + +import ( + "flag" + "log" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" +) + +var ( + outputFile = flag.String("out", "tables.go", "output file") +) + +var typeMap = map[string]elem{ + "A": tagAmbiguous, + "N": tagNeutral, + "Na": tagNarrow, + "W": tagWide, + "F": tagFullwidth, + "H": tagHalfwidth, +} + +// getWidthData calls f for every entry for which it is defined. +// +// f may be called multiple times for the same rune. The last call to f is the +// correct value. f is not called for all runes. The default tag type is +// Neutral. +func getWidthData(f func(r rune, tag elem, alt rune)) { + // Set the default values for Unified Ideographs. In line with Annex 11, + // we encode full ranges instead of the defined runes in Unified_Ideograph. + for _, b := range []struct{ lo, hi rune }{ + {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, + {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, + {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, + {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, + {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, + } { + for r := b.lo; r <= b.hi; r++ { + f(r, tagWide, 0) + } + } + + inverse := map[rune]rune{} + maps := map[string]bool{ + "": true, + "": true, + } + + // We cannot reuse package norm's decomposition, as we need an unexpanded + // decomposition. We make use of the opportunity to verify that the + // decomposition type is as expected. + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) + if !maps[s[0]] { + return + } + x, err := strconv.ParseUint(s[1], 16, 32) + if err != nil { + log.Fatalf("Error parsing rune %q", s[1]) + } + if inverse[r] != 0 || inverse[rune(x)] != 0 { + log.Fatalf("Circular dependency in mapping between %U and %U", r, x) + } + inverse[r] = rune(x) + inverse[rune(x)] = r + }) + + // ; + ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { + tag, ok := typeMap[p.String(1)] + if !ok { + log.Fatalf("Unknown width type %q", p.String(1)) + } + r := p.Rune(0) + alt, ok := inverse[r] + if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { + tag |= tagNeedsFold + if !ok { + log.Fatalf("Narrow or wide rune %U has no decomposition", r) + } + } + f(r, tag, alt) + }) +} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go new file mode 100644 index 0000000000..c17334aa61 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_trieval.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go new file mode 100644 index 0000000000..2713dce64a --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// The gcexportdata command is a diagnostic tool that displays the +// contents of gc export data files. +package main + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/types/typeutil" +) + +var packageFlag = flag.String("package", "", "alternative package to print") + +func main() { + log.SetPrefix("gcexportdata: ") + log.SetFlags(0) + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + os.Exit(2) + } + filename := flag.Args()[0] + + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Decode the package. + const primary = "" + imports := make(map[string]*types.Package) + fset := token.NewFileSet() + pkg, err := gcexportdata.Read(r, fset, imports, primary) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Optionally select an indirectly mentioned package. + if *packageFlag != "" { + pkg = imports[*packageFlag] + if pkg == nil { + fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", + filename, *packageFlag) + for p := range imports { + if p != primary { + fmt.Fprintf(os.Stderr, "\t%s\n", p) + } + } + os.Exit(1) + } + } + + // Print all package-level declarations, including non-exported ones. + fmt.Printf("package %s\n", pkg.Name()) + for _, imp := range pkg.Imports() { + fmt.Printf("import %q\n", imp.Path()) + } + qual := func(p *types.Package) string { + if pkg == p { + return "" + } + return p.Name() + } + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + fmt.Printf("%s: %s\n", + fset.Position(obj.Pos()), + types.ObjectString(obj, qual)) + + // For types, print each method. + if _, ok := obj.(*types.TypeName); ok { + for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + fmt.Printf("%s: %s\n", + fset.Position(method.Obj().Pos()), + types.SelectionString(method, qual)) + } + } + } +} diff --git a/vendor/golang.org/x/tools/internal/imports/mkindex.go b/vendor/golang.org/x/tools/internal/imports/mkindex.go new file mode 100644 index 0000000000..ef8c0d2871 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mkindex.go @@ -0,0 +1,173 @@ +// +build ignore + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command mkindex creates the file "pkgindex.go" containing an index of the Go +// standard library. The file is intended to be built as part of the imports +// package, so that the package may be used in environments where a GOROOT is +// not available (such as App Engine). +package imports + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +var ( + pkgIndex = make(map[string][]pkg) + exports = make(map[string]map[string]bool) +) + +func main() { + // Don't use GOPATH. + ctx := build.Default + ctx.GOPATH = "" + + // Populate pkgIndex global from GOROOT. + for _, path := range ctx.SrcDirs() { + f, err := os.Open(path) + if err != nil { + log.Print(err) + continue + } + children, err := f.Readdir(-1) + f.Close() + if err != nil { + log.Print(err) + continue + } + for _, child := range children { + if child.IsDir() { + loadPkg(path, child.Name()) + } + } + } + // Populate exports global. + for _, ps := range pkgIndex { + for _, p := range ps { + e := loadExports(p.dir) + if e != nil { + exports[p.dir] = e + } + } + } + + // Construct source file. + var buf bytes.Buffer + fmt.Fprint(&buf, pkgIndexHead) + fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) + fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) + src := buf.Bytes() + + // Replace main.pkg type name with pkg. + src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) + // Replace actual GOROOT with "/go". + src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) + // Add some line wrapping. + src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) + src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) + + var err error + src, err = format.Source(src) + if err != nil { + log.Fatal(err) + } + + // Write out source file. + err = ioutil.WriteFile("pkgindex.go", src, 0644) + if err != nil { + log.Fatal(err) + } +} + +const pkgIndexHead = `package imports + +func init() { + pkgIndexOnce.Do(func() { + pkgIndex.m = pkgIndexMaster + }) + loadExports = func(dir string) map[string]bool { + return exportsMaster[dir] + } +} +` + +type pkg struct { + importpath string // full pkg import path, e.g. "net/http" + dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" +} + +var fset = token.NewFileSet() + +func loadPkg(root, importpath string) { + shortName := path.Base(importpath) + if shortName == "testdata" { + return + } + + dir := filepath.Join(root, importpath) + pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ + importpath: importpath, + dir: dir, + }) + + pkgDir, err := os.Open(dir) + if err != nil { + return + } + children, err := pkgDir.Readdir(-1) + pkgDir.Close() + if err != nil { + return + } + for _, child := range children { + name := child.Name() + if name == "" { + continue + } + if c := name[0]; c == '.' || ('0' <= c && c <= '9') { + continue + } + if child.IsDir() { + loadPkg(root, filepath.Join(importpath, name)) + } + } +} + +func loadExports(dir string) map[string]bool { + exports := make(map[string]bool) + buildPkg, err := build.ImportDir(dir, 0) + if err != nil { + if strings.Contains(err.Error(), "no buildable Go source files in") { + return nil + } + log.Printf("could not import %q: %v", dir, err) + return nil + } + for _, file := range buildPkg.GoFiles { + f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) + if err != nil { + log.Printf("could not parse %q: %v", file, err) + continue + } + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports[name] = true + } + } + } + return exports +} diff --git a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go b/vendor/golang.org/x/tools/internal/imports/mkstdlib.go new file mode 100644 index 0000000000..39b86ccd90 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mkstdlib.go @@ -0,0 +1,128 @@ +// +build ignore + +// mkstdlib generates the zstdlib.go file, containing the Go standard +// library API symbols. It's baked into the binary to avoid scanning +// GOPATH in the common case. +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/format" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "sort" +) + +func mustOpen(name string) io.Reader { + f, err := os.Open(name) + if err != nil { + log.Fatal(err) + } + return f +} + +func api(base string) string { + return filepath.Join(runtime.GOROOT(), "api", base) +} + +var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) + +var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true} + +func main() { + var buf bytes.Buffer + outf := func(format string, args ...interface{}) { + fmt.Fprintf(&buf, format, args...) + } + outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") + outf("package imports\n") + outf("var stdlib = map[string][]string{\n") + f := io.MultiReader( + mustOpen(api("go1.txt")), + mustOpen(api("go1.1.txt")), + mustOpen(api("go1.2.txt")), + mustOpen(api("go1.3.txt")), + mustOpen(api("go1.4.txt")), + mustOpen(api("go1.5.txt")), + mustOpen(api("go1.6.txt")), + mustOpen(api("go1.7.txt")), + mustOpen(api("go1.8.txt")), + mustOpen(api("go1.9.txt")), + mustOpen(api("go1.10.txt")), + mustOpen(api("go1.11.txt")), + mustOpen(api("go1.12.txt")), + mustOpen(api("go1.13.txt")), + + // The API of the syscall/js package needs to be computed explicitly, + // because it's not included in the GOROOT/api/go1.*.txt files at this time. + syscallJSAPI(), + ) + sc := bufio.NewScanner(f) + + pkgs := map[string]map[string]bool{ + "unsafe": unsafeSyms, + } + paths := []string{"unsafe"} + + for sc.Scan() { + l := sc.Text() + if m := sym.FindStringSubmatch(l); m != nil { + path, sym := m[1], m[2] + + if _, ok := pkgs[path]; !ok { + pkgs[path] = map[string]bool{} + paths = append(paths, path) + } + pkgs[path][sym] = true + } + } + if err := sc.Err(); err != nil { + log.Fatal(err) + } + sort.Strings(paths) + for _, path := range paths { + outf("\t%q: []string{\n", path) + pkg := pkgs[path] + var syms []string + for sym := range pkg { + syms = append(syms, sym) + } + sort.Strings(syms) + for _, sym := range syms { + outf("\t\t%q,\n", sym) + } + outf("},\n") + } + outf("}\n") + fmtbuf, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) + if err != nil { + log.Fatal(err) + } +} + +// syscallJSAPI returns the API of the syscall/js package. +// It's computed from the contents of $(go env GOROOT)/src/syscall/js. +func syscallJSAPI() io.Reader { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") + out, err := cmd.Output() + if err != nil { + log.Fatalln(err) + } + return bytes.NewReader(out) +} diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go new file mode 100644 index 0000000000..168cdb8578 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -0,0 +1,308 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bufconn provides a net.Conn implemented by a buffer and related +// dialing and listening functionality. +package bufconn + +import ( + "fmt" + "io" + "net" + "sync" + "time" +) + +// Listener implements a net.Listener that creates local, buffered net.Conns +// via its Accept and Dial method. +type Listener struct { + mu sync.Mutex + sz int + ch chan net.Conn + done chan struct{} +} + +// Implementation of net.Error providing timeout +type netErrorTimeout struct { + error +} + +func (e netErrorTimeout) Timeout() bool { return true } +func (e netErrorTimeout) Temporary() bool { return false } + +var errClosed = fmt.Errorf("closed") +var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} + +// Listen returns a Listener that can only be contacted by its own Dialers and +// creates buffered connections between the two. +func Listen(sz int) *Listener { + return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} +} + +// Accept blocks until Dial is called, then returns a net.Conn for the server +// half of the connection. +func (l *Listener) Accept() (net.Conn, error) { + select { + case <-l.done: + return nil, errClosed + case c := <-l.ch: + return c, nil + } +} + +// Close stops the listener. +func (l *Listener) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.done: + // Already closed. + break + default: + close(l.done) + } + return nil +} + +// Addr reports the address of the listener. +func (l *Listener) Addr() net.Addr { return addr{} } + +// Dial creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. +func (l *Listener) Dial() (net.Conn, error) { + p1, p2 := newPipe(l.sz), newPipe(l.sz) + select { + case <-l.done: + return nil, errClosed + case l.ch <- &conn{p1, p2}: + return &conn{p2, p1}, nil + } +} + +type pipe struct { + mu sync.Mutex + + // buf contains the data in the pipe. It is a ring buffer of fixed capacity, + // with r and w pointing to the offset to read and write, respsectively. + // + // Data is read between [r, w) and written to [w, r), wrapping around the end + // of the slice if necessary. + // + // The buffer is empty if r == len(buf), otherwise if r == w, it is full. + // + // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. + buf []byte + w, r int + + wwait sync.Cond + rwait sync.Cond + + // Indicate that a write/read timeout has occurred + wtimedout bool + rtimedout bool + + wtimer *time.Timer + rtimer *time.Timer + + closed bool + writeClosed bool +} + +func newPipe(sz int) *pipe { + p := &pipe{buf: make([]byte, 0, sz)} + p.wwait.L = &p.mu + p.rwait.L = &p.mu + + p.wtimer = time.AfterFunc(0, func() {}) + p.rtimer = time.AfterFunc(0, func() {}) + return p +} + +func (p *pipe) empty() bool { + return p.r == len(p.buf) +} + +func (p *pipe) full() bool { + return p.r < len(p.buf) && p.r == p.w +} + +func (p *pipe) Read(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + // Block until p has data. + for { + if p.closed { + return 0, io.ErrClosedPipe + } + if !p.empty() { + break + } + if p.writeClosed { + return 0, io.EOF + } + if p.rtimedout { + return 0, errTimeout + } + + p.rwait.Wait() + } + wasFull := p.full() + + n = copy(b, p.buf[p.r:len(p.buf)]) + p.r += n + if p.r == cap(p.buf) { + p.r = 0 + p.buf = p.buf[:p.w] + } + + // Signal a blocked writer, if any + if wasFull { + p.wwait.Signal() + } + + return n, nil +} + +func (p *pipe) Write(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.closed { + return 0, io.ErrClosedPipe + } + for len(b) > 0 { + // Block until p is not full. + for { + if p.closed || p.writeClosed { + return 0, io.ErrClosedPipe + } + if !p.full() { + break + } + if p.wtimedout { + return 0, errTimeout + } + + p.wwait.Wait() + } + wasEmpty := p.empty() + + end := cap(p.buf) + if p.w < p.r { + end = p.r + } + x := copy(p.buf[p.w:end], b) + b = b[x:] + n += x + p.w += x + if p.w > len(p.buf) { + p.buf = p.buf[:p.w] + } + if p.w == cap(p.buf) { + p.w = 0 + } + + // Signal a blocked reader, if any. + if wasEmpty { + p.rwait.Signal() + } + } + return n, nil +} + +func (p *pipe) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + // Signal all blocked readers and writers to return an error. + p.rwait.Broadcast() + p.wwait.Broadcast() + return nil +} + +func (p *pipe) closeWrite() error { + p.mu.Lock() + defer p.mu.Unlock() + p.writeClosed = true + // Signal all blocked readers and writers to return an error. + p.rwait.Broadcast() + p.wwait.Broadcast() + return nil +} + +type conn struct { + io.Reader + io.Writer +} + +func (c *conn) Close() error { + err1 := c.Reader.(*pipe).Close() + err2 := c.Writer.(*pipe).closeWrite() + if err1 != nil { + return err1 + } + return err2 +} + +func (c *conn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +func (c *conn) SetReadDeadline(t time.Time) error { + p := c.Reader.(*pipe) + p.mu.Lock() + defer p.mu.Unlock() + p.rtimer.Stop() + p.rtimedout = false + if !t.IsZero() { + p.rtimer = time.AfterFunc(time.Until(t), func() { + p.mu.Lock() + defer p.mu.Unlock() + p.rtimedout = true + p.rwait.Broadcast() + }) + } + return nil +} + +func (c *conn) SetWriteDeadline(t time.Time) error { + p := c.Writer.(*pipe) + p.mu.Lock() + defer p.mu.Unlock() + p.wtimer.Stop() + p.wtimedout = false + if !t.IsZero() { + p.wtimer = time.AfterFunc(time.Until(t), func() { + p.mu.Lock() + defer p.mu.Unlock() + p.wtimedout = true + p.wwait.Broadcast() + }) + } + return nil +} + +func (*conn) LocalAddr() net.Addr { return addr{} } +func (*conn) RemoteAddr() net.Addr { return addr{} } + +type addr struct{} + +func (addr) Network() string { return "bufconn" } +func (addr) String() string { return "bufconn" } diff --git a/vendor/modules.txt b/vendor/modules.txt index 587b103f24..db71563d30 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,13 +1,13 @@ # cloud.google.com/go v0.49.0 cloud.google.com/go -cloud.google.com/go/compute/metadata cloud.google.com/go/iam -cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen +cloud.google.com/go/internal +cloud.google.com/go/compute/metadata # cloud.google.com/go/bigtable v1.1.0 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest @@ -61,38 +61,35 @@ github.com/asaskevich/govalidator github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/service/applicationautoscaling +github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface +github.com/aws/aws-sdk-go/service/dynamodb +github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/s3/s3iface +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/internal/sdkio github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/crr github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/s3err -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/sdkmath -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/ec2query +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/aws/crr +github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil -github.com/aws/aws-sdk-go/private/protocol/jsonrpc -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil @@ -106,6 +103,16 @@ github.com/aws/aws-sdk-go/service/s3/internal/arn github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/service/ec2 +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/ec2query # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible @@ -142,8 +149,8 @@ github.com/fluent/fluent-logger-golang/fluent github.com/fsouza/fake-gcs-server/fakestorage github.com/fsouza/fake-gcs-server/internal/backend # github.com/go-kit/kit v0.9.0 -github.com/go-kit/kit/log github.com/go-kit/kit/log/level +github.com/go-kit/kit/log # github.com/go-logfmt/logfmt v0.4.0 github.com/go-logfmt/logfmt # github.com/go-openapi/analysis v0.19.4 @@ -158,14 +165,14 @@ github.com/go-openapi/jsonreference # github.com/go-openapi/loads v0.19.2 github.com/go-openapi/loads # github.com/go-openapi/runtime v0.19.3 +github.com/go-openapi/runtime/middleware github.com/go-openapi/runtime -github.com/go-openapi/runtime/flagext github.com/go-openapi/runtime/logger -github.com/go-openapi/runtime/middleware github.com/go-openapi/runtime/middleware/denco github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security +github.com/go-openapi/runtime/flagext # github.com/go-openapi/spec v0.19.2 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.19.2 @@ -186,42 +193,42 @@ github.com/gogo/googleapis/google/rpc # github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -github.com/gogo/protobuf/protoc-gen-gogo/descriptor -github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/types +github.com/gogo/protobuf/sortkeys +github.com/gogo/protobuf/protoc-gen-gogo/descriptor # github.com/gogo/status v1.0.3 github.com/gogo/status # github.com/golang-migrate/migrate/v4 v4.7.0 github.com/golang-migrate/migrate/v4 -github.com/golang-migrate/migrate/v4/database github.com/golang-migrate/migrate/v4/database/postgres +github.com/golang-migrate/migrate/v4/source/file +github.com/golang-migrate/migrate/v4/database github.com/golang-migrate/migrate/v4/internal/url github.com/golang-migrate/migrate/v4/source -github.com/golang-migrate/migrate/v4/source/file # github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.2 -github.com/golang/protobuf/descriptor -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/wrappers +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/descriptor +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap github.com/golang/protobuf/protoc-gen-go/grpc -github.com/golang/protobuf/protoc-gen-go/plugin -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/struct -github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/wrappers +github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/plugin # github.com/golang/snappy v0.0.1 github.com/golang/snappy # github.com/gomodule/redigo v2.0.0+incompatible -github.com/gomodule/redigo/internal github.com/gomodule/redigo/redis +github.com/gomodule/redigo/internal # github.com/google/btree v1.0.0 github.com/google/btree # github.com/google/go-cmp v0.3.1 @@ -245,29 +252,29 @@ github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors -github.com/gophercloud/gophercloud/openstack/compute/v2/flavors -github.com/gophercloud/gophercloud/openstack/compute/v2/images github.com/gophercloud/gophercloud/openstack/compute/v2/servers -github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/pagination github.com/gophercloud/gophercloud/openstack/identity/v2/tokens github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils -github.com/gophercloud/gophercloud/pagination +github.com/gophercloud/gophercloud/openstack/compute/v2/flavors +github.com/gophercloud/gophercloud/openstack/compute/v2/images +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants # github.com/gorilla/mux v1.7.1 github.com/gorilla/mux # github.com/gorilla/websocket v1.4.0 github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/tags github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing +github.com/grpc-ecosystem/go-grpc-middleware/tags github.com/grpc-ecosystem/go-grpc-middleware/util/metautils # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus # github.com/grpc-ecosystem/grpc-gateway v1.12.1 -github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities +github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool # github.com/hashicorp/consul/api v1.3.0 @@ -287,8 +294,8 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr # github.com/hashicorp/golang-lru v0.5.3 -github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru +github.com/hashicorp/golang-lru # github.com/hashicorp/memberlist v0.1.5 github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.8.5 @@ -322,9 +329,9 @@ github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram # github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e -github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +github.com/mailru/easyjson/buffer # github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe github.com/mattn/go-ieproxy # github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -362,8 +369,8 @@ github.com/opentracing-contrib/go-grpc github.com/opentracing-contrib/go-stdlib/nethttp # github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +github.com/opentracing/opentracing-go/ext # github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8 github.com/philhofer/fwd # github.com/pkg/errors v0.8.1 @@ -372,25 +379,11 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/prometheus/alertmanager v0.19.0 github.com/prometheus/alertmanager/api -github.com/prometheus/alertmanager/api/metrics -github.com/prometheus/alertmanager/api/v1 -github.com/prometheus/alertmanager/api/v2 -github.com/prometheus/alertmanager/api/v2/models -github.com/prometheus/alertmanager/api/v2/restapi -github.com/prometheus/alertmanager/api/v2/restapi/operations -github.com/prometheus/alertmanager/api/v2/restapi/operations/alert -github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup -github.com/prometheus/alertmanager/api/v2/restapi/operations/general -github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver -github.com/prometheus/alertmanager/api/v2/restapi/operations/silence -github.com/prometheus/alertmanager/asset github.com/prometheus/alertmanager/cluster -github.com/prometheus/alertmanager/cluster/clusterpb github.com/prometheus/alertmanager/config github.com/prometheus/alertmanager/dispatch github.com/prometheus/alertmanager/inhibit github.com/prometheus/alertmanager/nflog -github.com/prometheus/alertmanager/nflog/nflogpb github.com/prometheus/alertmanager/notify github.com/prometheus/alertmanager/notify/email github.com/prometheus/alertmanager/notify/hipchat @@ -401,33 +394,47 @@ github.com/prometheus/alertmanager/notify/slack github.com/prometheus/alertmanager/notify/victorops github.com/prometheus/alertmanager/notify/webhook github.com/prometheus/alertmanager/notify/wechat -github.com/prometheus/alertmanager/pkg/parse -github.com/prometheus/alertmanager/provider github.com/prometheus/alertmanager/provider/mem github.com/prometheus/alertmanager/silence -github.com/prometheus/alertmanager/silence/silencepb -github.com/prometheus/alertmanager/store github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui +github.com/prometheus/alertmanager/api/v1 +github.com/prometheus/alertmanager/api/v2 +github.com/prometheus/alertmanager/provider +github.com/prometheus/alertmanager/cluster/clusterpb +github.com/prometheus/alertmanager/store +github.com/prometheus/alertmanager/nflog/nflogpb +github.com/prometheus/alertmanager/silence/silencepb +github.com/prometheus/alertmanager/asset +github.com/prometheus/alertmanager/api/v2/models +github.com/prometheus/alertmanager/api/metrics +github.com/prometheus/alertmanager/pkg/parse +github.com/prometheus/alertmanager/api/v2/restapi +github.com/prometheus/alertmanager/api/v2/restapi/operations +github.com/prometheus/alertmanager/api/v2/restapi/operations/alert +github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup +github.com/prometheus/alertmanager/api/v2/restapi/operations/general +github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver +github.com/prometheus/alertmanager/api/v2/restapi/operations/silence # github.com/prometheus/client_golang v1.2.1 -github.com/prometheus/client_golang/api -github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto +github.com/prometheus/client_golang/api +github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus/promhttp -github.com/prometheus/client_golang/prometheus/push +github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/testutil +github.com/prometheus/client_golang/prometheus/push # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go # github.com/prometheus/common v0.7.0 +github.com/prometheus/common/version +github.com/prometheus/common/model +github.com/prometheus/common/route github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -github.com/prometheus/common/model -github.com/prometheus/common/route -github.com/prometheus/common/version # github.com/prometheus/procfs v0.0.6 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -452,40 +459,60 @@ github.com/prometheus/prometheus/discovery/zookeeper github.com/prometheus/prometheus/notifier github.com/prometheus/prometheus/pkg/exemplar github.com/prometheus/prometheus/pkg/gate +# github.com/prometheus/prometheus v1.8.2-0.20191126064551-80ba03c67da1 github.com/prometheus/prometheus/pkg/labels -github.com/prometheus/prometheus/pkg/logging -github.com/prometheus/prometheus/pkg/pool +github.com/prometheus/prometheus/promql +github.com/prometheus/prometheus/tsdb/chunkenc github.com/prometheus/prometheus/pkg/relabel +github.com/prometheus/prometheus/tsdb github.com/prometheus/prometheus/pkg/rulefmt +github.com/prometheus/prometheus/rules github.com/prometheus/prometheus/pkg/textparse github.com/prometheus/prometheus/pkg/timestamp github.com/prometheus/prometheus/pkg/value -github.com/prometheus/prometheus/prompb -github.com/prometheus/prometheus/promql -github.com/prometheus/prometheus/rules -github.com/prometheus/prometheus/scrape github.com/prometheus/prometheus/storage -github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/storage/tsdb -github.com/prometheus/prometheus/template -github.com/prometheus/prometheus/tsdb -github.com/prometheus/prometheus/tsdb/chunkenc -github.com/prometheus/prometheus/tsdb/chunks -github.com/prometheus/prometheus/tsdb/encoding +github.com/prometheus/prometheus/util/stats +github.com/prometheus/prometheus/util/strutil +github.com/prometheus/prometheus/util/testutil +github.com/prometheus/prometheus/config +github.com/prometheus/prometheus/web/api/v1 +github.com/prometheus/prometheus/pkg/gate github.com/prometheus/prometheus/tsdb/errors github.com/prometheus/prometheus/tsdb/fileutil +github.com/prometheus/prometheus/tsdb/wal +github.com/prometheus/prometheus/scrape +github.com/prometheus/prometheus/discovery +github.com/prometheus/prometheus/discovery/config +github.com/prometheus/prometheus/discovery/dns +github.com/prometheus/prometheus/discovery/targetgroup +github.com/prometheus/prometheus/notifier +github.com/prometheus/prometheus/util/teststorage +github.com/prometheus/prometheus/tsdb/chunks +github.com/prometheus/prometheus/tsdb/encoding github.com/prometheus/prometheus/tsdb/goversion github.com/prometheus/prometheus/tsdb/index github.com/prometheus/prometheus/tsdb/record github.com/prometheus/prometheus/tsdb/tombstones -github.com/prometheus/prometheus/tsdb/wal +github.com/prometheus/prometheus/template +github.com/prometheus/prometheus/pkg/exemplar +github.com/prometheus/prometheus/prompb +github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/util/httputil -github.com/prometheus/prometheus/util/stats -github.com/prometheus/prometheus/util/strutil -github.com/prometheus/prometheus/util/teststorage -github.com/prometheus/prometheus/util/testutil +github.com/prometheus/prometheus/pkg/pool +github.com/prometheus/prometheus/discovery/azure +github.com/prometheus/prometheus/discovery/consul +github.com/prometheus/prometheus/discovery/ec2 +github.com/prometheus/prometheus/discovery/file +github.com/prometheus/prometheus/discovery/gce +github.com/prometheus/prometheus/discovery/kubernetes +github.com/prometheus/prometheus/discovery/marathon +github.com/prometheus/prometheus/discovery/openstack +github.com/prometheus/prometheus/discovery/triton +github.com/prometheus/prometheus/discovery/zookeeper +github.com/prometheus/prometheus/discovery/refresh +github.com/prometheus/prometheus/pkg/logging github.com/prometheus/prometheus/util/treecache -github.com/prometheus/prometheus/web/api/v1 # github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 github.com/rafaeljusto/redigomock # github.com/rs/cors v1.6.0 @@ -518,8 +545,6 @@ github.com/spf13/pflag # github.com/stretchr/objx v0.2.0 github.com/stretchr/objx # github.com/stretchr/testify v1.4.0 -github.com/stretchr/testify/assert -github.com/stretchr/testify/mock github.com/stretchr/testify/require # github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d github.com/thanos-io/thanos/pkg/block @@ -541,10 +566,25 @@ github.com/thanos-io/thanos/pkg/objstore/gcs github.com/thanos-io/thanos/pkg/objstore/s3 github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/runutil +github.com/stretchr/testify/mock +github.com/stretchr/testify/assert +# github.com/thanos-io/thanos v0.8.1-0.20200102143048-a37ac093a67a +github.com/thanos-io/thanos/pkg/compact +github.com/thanos-io/thanos/pkg/compact/downsample +github.com/thanos-io/thanos/pkg/objstore +github.com/thanos-io/thanos/pkg/block/metadata github.com/thanos-io/thanos/pkg/shipper +github.com/thanos-io/thanos/pkg/model +github.com/thanos-io/thanos/pkg/runutil github.com/thanos-io/thanos/pkg/store github.com/thanos-io/thanos/pkg/store/cache github.com/thanos-io/thanos/pkg/store/storepb +github.com/thanos-io/thanos/pkg/objstore/gcs +github.com/thanos-io/thanos/pkg/objstore/s3 +github.com/thanos-io/thanos/pkg/block +github.com/thanos-io/thanos/pkg/component +github.com/thanos-io/thanos/pkg/extprom +github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/strutil github.com/thanos-io/thanos/pkg/tracing # github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d @@ -555,39 +595,41 @@ github.com/tmc/grpc-websocket-proxy/wsproxy github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage -github.com/uber/jaeger-client-go/internal/baggage/remote github.com/uber/jaeger-client-go/internal/spanlog github.com/uber/jaeger-client-go/internal/throttler -github.com/uber/jaeger-client-go/internal/throttler/remote github.com/uber/jaeger-client-go/log -github.com/uber/jaeger-client-go/rpcmetrics github.com/uber/jaeger-client-go/thrift -github.com/uber/jaeger-client-go/thrift-gen/agent -github.com/uber/jaeger-client-go/thrift-gen/baggage github.com/uber/jaeger-client-go/thrift-gen/jaeger github.com/uber/jaeger-client-go/thrift-gen/sampling github.com/uber/jaeger-client-go/thrift-gen/zipkincore -github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils +github.com/uber/jaeger-client-go/internal/baggage/remote +github.com/uber/jaeger-client-go/internal/throttler/remote +github.com/uber/jaeger-client-go/rpcmetrics +github.com/uber/jaeger-client-go/transport +github.com/uber/jaeger-client-go/thrift-gen/agent +github.com/uber/jaeger-client-go/thrift-gen/baggage # github.com/uber/jaeger-lib v2.2.0+incompatible -github.com/uber/jaeger-lib/metrics github.com/uber/jaeger-lib/metrics/prometheus +github.com/uber/jaeger-lib/metrics # github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 github.com/weaveworks/billing-client # github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 github.com/weaveworks/common/aws +# github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4 => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 +github.com/weaveworks/common/tracing +github.com/weaveworks/common/server +github.com/weaveworks/common/user github.com/weaveworks/common/errors github.com/weaveworks/common/httpgrpc -github.com/weaveworks/common/httpgrpc/server github.com/weaveworks/common/instrument +github.com/weaveworks/common/mtime +github.com/weaveworks/common/aws github.com/weaveworks/common/logging +github.com/weaveworks/common/httpgrpc/server github.com/weaveworks/common/middleware -github.com/weaveworks/common/mtime -github.com/weaveworks/common/server github.com/weaveworks/common/signals github.com/weaveworks/common/test -github.com/weaveworks/common/tracing -github.com/weaveworks/common/user # github.com/weaveworks/promrus v1.2.0 github.com/weaveworks/promrus # github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 @@ -595,32 +637,23 @@ github.com/xiang90/probing # go.etcd.io/bbolt v1.3.3 go.etcd.io/bbolt # go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5 -go.etcd.io/etcd/auth -go.etcd.io/etcd/auth/authpb -go.etcd.io/etcd/client go.etcd.io/etcd/clientv3 -go.etcd.io/etcd/clientv3/balancer -go.etcd.io/etcd/clientv3/balancer/picker -go.etcd.io/etcd/clientv3/balancer/resolver/endpoint -go.etcd.io/etcd/clientv3/concurrency go.etcd.io/etcd/embed +go.etcd.io/etcd/etcdserver/api/v3client +go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/clientv3/balancer +go.etcd.io/etcd/clientv3/balancer/picker +go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/etcdserver/etcdserverpb +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/types go.etcd.io/etcd/etcdserver -go.etcd.io/etcd/etcdserver/api go.etcd.io/etcd/etcdserver/api/etcdhttp -go.etcd.io/etcd/etcdserver/api/membership go.etcd.io/etcd/etcdserver/api/rafthttp -go.etcd.io/etcd/etcdserver/api/snap -go.etcd.io/etcd/etcdserver/api/snap/snappb -go.etcd.io/etcd/etcdserver/api/v2auth -go.etcd.io/etcd/etcdserver/api/v2discovery -go.etcd.io/etcd/etcdserver/api/v2error go.etcd.io/etcd/etcdserver/api/v2http -go.etcd.io/etcd/etcdserver/api/v2http/httptypes -go.etcd.io/etcd/etcdserver/api/v2stats -go.etcd.io/etcd/etcdserver/api/v2store go.etcd.io/etcd/etcdserver/api/v2v3 -go.etcd.io/etcd/etcdserver/api/v3alarm -go.etcd.io/etcd/etcdserver/api/v3client go.etcd.io/etcd/etcdserver/api/v3compactor go.etcd.io/etcd/etcdserver/api/v3election go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb @@ -629,112 +662,121 @@ go.etcd.io/etcd/etcdserver/api/v3lock go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw go.etcd.io/etcd/etcdserver/api/v3rpc -go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes -go.etcd.io/etcd/etcdserver/etcdserverpb go.etcd.io/etcd/etcdserver/etcdserverpb/gw -go.etcd.io/etcd/lease -go.etcd.io/etcd/lease/leasehttp -go.etcd.io/etcd/lease/leasepb -go.etcd.io/etcd/mvcc -go.etcd.io/etcd/mvcc/backend -go.etcd.io/etcd/mvcc/mvccpb -go.etcd.io/etcd/pkg/adt -go.etcd.io/etcd/pkg/contention -go.etcd.io/etcd/pkg/cpuutil -go.etcd.io/etcd/pkg/crc go.etcd.io/etcd/pkg/debugutil -go.etcd.io/etcd/pkg/fileutil go.etcd.io/etcd/pkg/flags go.etcd.io/etcd/pkg/httputil -go.etcd.io/etcd/pkg/idutil -go.etcd.io/etcd/pkg/ioutil -go.etcd.io/etcd/pkg/logutil go.etcd.io/etcd/pkg/netutil -go.etcd.io/etcd/pkg/pathutil -go.etcd.io/etcd/pkg/pbutil go.etcd.io/etcd/pkg/runtime -go.etcd.io/etcd/pkg/schedule go.etcd.io/etcd/pkg/srv -go.etcd.io/etcd/pkg/systemd go.etcd.io/etcd/pkg/tlsutil go.etcd.io/etcd/pkg/transport -go.etcd.io/etcd/pkg/types -go.etcd.io/etcd/pkg/wait +go.etcd.io/etcd/version +go.etcd.io/etcd/wal go.etcd.io/etcd/proxy/grpcproxy/adapter +go.etcd.io/etcd/pkg/systemd go.etcd.io/etcd/raft -go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/auth +go.etcd.io/etcd/etcdserver/api +go.etcd.io/etcd/etcdserver/api/membership +go.etcd.io/etcd/etcdserver/api/snap +go.etcd.io/etcd/etcdserver/api/v2discovery +go.etcd.io/etcd/etcdserver/api/v2http/httptypes +go.etcd.io/etcd/etcdserver/api/v2stats +go.etcd.io/etcd/etcdserver/api/v2store +go.etcd.io/etcd/etcdserver/api/v3alarm +go.etcd.io/etcd/lease +go.etcd.io/etcd/lease/leasehttp +go.etcd.io/etcd/mvcc +go.etcd.io/etcd/mvcc/backend +go.etcd.io/etcd/pkg/contention +go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/idutil +go.etcd.io/etcd/pkg/pbutil +go.etcd.io/etcd/pkg/schedule +go.etcd.io/etcd/pkg/wait go.etcd.io/etcd/raft/raftpb -go.etcd.io/etcd/raft/tracker -go.etcd.io/etcd/version -go.etcd.io/etcd/wal go.etcd.io/etcd/wal/walpb +go.etcd.io/etcd/etcdserver/api/v2error +go.etcd.io/etcd/pkg/ioutil +go.etcd.io/etcd/etcdserver/api/v2auth +go.etcd.io/etcd/clientv3/concurrency +go.etcd.io/etcd/pkg/adt +go.etcd.io/etcd/pkg/cpuutil +go.etcd.io/etcd/pkg/crc +go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/tracker +go.etcd.io/etcd/etcdserver/api/snap/snappb +go.etcd.io/etcd/client +go.etcd.io/etcd/lease/leasepb +go.etcd.io/etcd/pkg/pathutil # go.mongodb.org/mongo-driver v1.1.0 go.mongodb.org/mongo-driver/bson -go.mongodb.org/mongo-driver/bson/bsoncodec -go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive +go.mongodb.org/mongo-driver/bson/bsoncodec +go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/x/bsonx/bsoncore # go.opencensus.io v0.22.2 -go.opencensus.io -go.opencensus.io/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricdata -go.opencensus.io/metric/metricproducer -go.opencensus.io/plugin/ocgrpc go.opencensus.io/plugin/ochttp +go.opencensus.io/trace +go.opencensus.io/plugin/ocgrpc go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/resource go.opencensus.io/stats -go.opencensus.io/stats/internal go.opencensus.io/stats/view go.opencensus.io/tag -go.opencensus.io/trace -go.opencensus.io/trace/internal go.opencensus.io/trace/propagation +go.opencensus.io/internal +go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate +go.opencensus.io/metric/metricdata +go.opencensus.io/stats/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricproducer +go.opencensus.io +go.opencensus.io/resource # go.uber.org/atomic v1.5.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr # go.uber.org/zap v1.10.0 go.uber.org/zap -go.uber.org/zap/buffer +go.uber.org/zap/zapcore go.uber.org/zap/internal/bufferpool +go.uber.org/zap/buffer go.uber.org/zap/internal/color go.uber.org/zap/internal/exit -go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 -golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt -golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/ed25519 +golang.org/x/crypto/argon2 golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/blake2b golang.org/x/crypto/ssh/terminal # golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 -golang.org/x/exp/apidiff golang.org/x/exp/cmd/apidiff +golang.org/x/exp/apidiff # golang.org/x/lint v0.0.0-20190930215403-16217165b5de -golang.org/x/lint golang.org/x/lint/golint +golang.org/x/lint # golang.org/x/net v0.0.0-20191112182307-2180aed22343 -golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp -golang.org/x/net/http/httpguts -golang.org/x/net/http/httpproxy +golang.org/x/net/netutil +golang.org/x/net/trace +golang.org/x/net/internal/timeseries golang.org/x/net/http2 golang.org/x/net/http2/hpack -golang.org/x/net/idna -golang.org/x/net/internal/iana -golang.org/x/net/internal/socket -golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 -golang.org/x/net/netutil +golang.org/x/net/http/httpguts golang.org/x/net/publicsuffix -golang.org/x/net/trace +golang.org/x/net/idna +golang.org/x/net/http/httpproxy +golang.org/x/net/bpf +golang.org/x/net/internal/iana +golang.org/x/net/internal/socket # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/google @@ -744,104 +786,94 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 -golang.org/x/sys/cpu -golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/unix golang.org/x/sys/windows/registry +golang.org/x/sys/cpu # golang.org/x/text v0.3.2 -golang.org/x/text/secure/bidirule golang.org/x/text/transform -golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm +golang.org/x/text/secure/bidirule +golang.org/x/text/unicode/bidi golang.org/x/text/width # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate # golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 golang.org/x/tools/cmd/goimports -golang.org/x/tools/go/analysis -golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/ast/astutil -golang.org/x/tools/go/ast/inspector -golang.org/x/tools/go/buildutil golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/packages +golang.org/x/tools/internal/imports +golang.org/x/tools/go/analysis golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath -golang.org/x/tools/go/types/typeutil -golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk -golang.org/x/tools/internal/imports -golang.org/x/tools/internal/module golang.org/x/tools/internal/semver golang.org/x/tools/internal/span +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/internal/module +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/buildutil +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/types/typeutil +golang.org/x/tools/internal/fastwalk # google.golang.org/api v0.14.0 +google.golang.org/api/option +google.golang.org/api/transport/http google.golang.org/api/cloudresourcemanager/v1 -google.golang.org/api/compute/v1 +google.golang.org/api/iterator +google.golang.org/api/transport/grpc google.golang.org/api/googleapi -google.golang.org/api/googleapi/transport +google.golang.org/api/storage/v1 google.golang.org/api/internal +google.golang.org/api/googleapi/transport +google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/transport google.golang.org/api/internal/gensupport google.golang.org/api/internal/third_party/uritemplates -google.golang.org/api/iterator -google.golang.org/api/option -google.golang.org/api/storage/v1 -google.golang.org/api/transport -google.golang.org/api/transport/grpc -google.golang.org/api/transport/http -google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/compute/v1 # google.golang.org/appengine v1.6.5 +google.golang.org/appengine/urlfetch google.golang.org/appengine +google.golang.org/appengine/socket google.golang.org/appengine/internal +google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity +google.golang.org/appengine/internal/modules +google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log -google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket -google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket -google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 -google.golang.org/genproto/googleapis/api/annotations -google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 +google.golang.org/genproto/protobuf/field_mask google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/longrunning google.golang.org/genproto/googleapis/rpc/code -google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/protobuf/field_mask +google.golang.org/genproto/googleapis/api/httpbody # google.golang.org/grpc v1.25.1 google.golang.org/grpc +google.golang.org/grpc/codes +google.golang.org/grpc/status +google.golang.org/grpc/health/grpc_health_v1 +google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/test/bufconn google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb -google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/alts -google.golang.org/grpc/credentials/alts/internal -google.golang.org/grpc/credentials/alts/internal/authinfo -google.golang.org/grpc/credentials/alts/internal/conn -google.golang.org/grpc/credentials/alts/internal/handshaker -google.golang.org/grpc/credentials/alts/internal/handshaker/service -google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp -google.golang.org/grpc/credentials/google -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding -google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog -google.golang.org/grpc/health -google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload @@ -853,19 +885,30 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough -google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata -google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver -google.golang.org/grpc/resolver/dns -google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats -google.golang.org/grpc/status google.golang.org/grpc/tap +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/credentials/google +google.golang.org/grpc/credentials/oauth +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/health +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn # gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 # gopkg.in/fsnotify/fsnotify.v1 v1.4.7 @@ -877,42 +920,44 @@ gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.2.5 gopkg.in/yaml.v2 # honnef.co/go/tools v0.0.1-2019.2.3 -honnef.co/go/tools/arg honnef.co/go/tools/cmd/staticcheck +honnef.co/go/tools/lint +honnef.co/go/tools/lint/lintutil +honnef.co/go/tools/simple +honnef.co/go/tools/staticcheck +honnef.co/go/tools/stylecheck +honnef.co/go/tools/unused honnef.co/go/tools/config -honnef.co/go/tools/deprecated honnef.co/go/tools/facts -honnef.co/go/tools/functions -honnef.co/go/tools/go/types/typeutil honnef.co/go/tools/internal/cache +honnef.co/go/tools/loader +honnef.co/go/tools/lint/lintutil/format +honnef.co/go/tools/version +honnef.co/go/tools/arg honnef.co/go/tools/internal/passes/buildssa -honnef.co/go/tools/internal/renameio honnef.co/go/tools/internal/sharedcheck -honnef.co/go/tools/lint honnef.co/go/tools/lint/lintdsl -honnef.co/go/tools/lint/lintutil -honnef.co/go/tools/lint/lintutil/format -honnef.co/go/tools/loader +honnef.co/go/tools/deprecated +honnef.co/go/tools/functions honnef.co/go/tools/printf -honnef.co/go/tools/simple honnef.co/go/tools/ssa honnef.co/go/tools/ssautil -honnef.co/go/tools/staticcheck honnef.co/go/tools/staticcheck/vrp -honnef.co/go/tools/stylecheck -honnef.co/go/tools/unused -honnef.co/go/tools/version +honnef.co/go/tools/go/types/typeutil +honnef.co/go/tools/internal/renameio # k8s.io/api v0.0.0-20191115095533-47f6de673b26 +k8s.io/api/core/v1 +k8s.io/api/extensions/v1beta1 +k8s.io/api/apps/v1beta1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 -k8s.io/api/apps/v1beta1 +k8s.io/api/autoscaling/v1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 -k8s.io/api/autoscaling/v1 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 @@ -921,14 +966,12 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 -k8s.io/api/core/v1 +k8s.io/api/policy/v1beta1 k8s.io/api/events/v1beta1 -k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 -k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 @@ -940,49 +983,52 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 # k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2 -k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/api/meta -k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/conversion -k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/runtime/serializer/json -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/recognizer -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock -k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/util/errors -k8s.io/apimachinery/pkg/util/framer -k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/versioning +k8s.io/apimachinery/pkg/apis/meta/v1beta1 +k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/version -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab -k8s.io/client-go/discovery k8s.io/client-go/kubernetes -k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/rest +k8s.io/client-go/tools/cache +k8s.io/client-go/tools/metrics +k8s.io/client-go/util/workqueue +k8s.io/client-go/discovery k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -1019,31 +1065,28 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/pkg/apis/clientauthentication -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 -k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/util/flowcontrol k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec -k8s.io/client-go/rest k8s.io/client-go/rest/watch -k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/tools/metrics -k8s.io/client-go/tools/pager -k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert +k8s.io/client-go/tools/pager +k8s.io/client-go/util/retry +k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/tools/reference +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/util/connrotation -k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/keyutil -k8s.io/client-go/util/retry -k8s.io/client-go/util/workqueue # k8s.io/klog v1.0.0 k8s.io/klog # k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 k8s.io/utils/buffer -k8s.io/utils/integer k8s.io/utils/trace +k8s.io/utils/integer # rsc.io/binaryregexp v0.2.0 rsc.io/binaryregexp rsc.io/binaryregexp/syntax From 3a81bc28d4b3cc780d9769689cdeb0a5e2276084 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 1 Feb 2020 17:02:05 +0000 Subject: [PATCH 07/20] Add integration test using older image Here we test pushing from the new distributor to an older ingester, and hand-over from the old ingester to a new ingester. The backwards-compatibility version is pinned at v0.6.0. Signed-off-by: Bryan Boreham --- integration-tests/test-backcompat.sh | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100755 integration-tests/test-backcompat.sh diff --git a/integration-tests/test-backcompat.sh b/integration-tests/test-backcompat.sh new file mode 100755 index 0000000000..468b75a443 --- /dev/null +++ b/integration-tests/test-backcompat.sh @@ -0,0 +1,34 @@ +#! /bin/sh + +set -e + +BACKWARD_IMAGE=v0.6.0 + +. "$(dirname "$0")/common.sh" + +"$(dirname "$0")/start-components.sh" + +# Note we haven't created any tables in the DB yet - we aren't going to run long enough to flush anything + +echo Start first ingester, using older version +docker run $RUN_ARGS -d --name=i1 --hostname=i1 "$IMAGE_PREFIX"cortex:$BACKWARD_IMAGE -target=ingester $INGESTER_ARGS -ingester.claim-on-rollout=true + +I1_ADDR=$(container_ip i1) +wait_for "curl -s -f -m 3 $I1_ADDR/ready" "ingester ready" + +has_tokens_owned() { + curl -s -f -m 3 $1/metrics | grep -q 'cortex_ring_tokens_owned' +} +DIST_ADDR=$(container_ip distributor) +wait_for "has_tokens_owned $DIST_ADDR" "distributor to see ingester in ring" + +docker run $RUN_ARGS --rm $AVALANCHE_IMAGE --metric-count=2 --label-count=2 --series-count=2 --remote-requests-count=1 --remote-url=http://distributor/api/prom/push + +echo Start second ingester, waiting for first +docker run $RUN_ARGS -d --name=i2 --hostname=i2 "$IMAGE_PREFIX"cortex:$IMAGE_TAG -target=ingester $INGESTER_ARGS -ingester.join-after=300s -ingester.claim-on-rollout=true + +echo Stop first ingester so it should hand over +docker stop i1 + +I2_ADDR=$(container_ip i2) +wait_for "curl -s -f -m 3 $I2_ADDR/ready" "ingester ready" From b7ad21438486c041abd394273ea0db86f76be6a0 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Sat, 1 Feb 2020 15:10:27 -0500 Subject: [PATCH 08/20] Removes proto compat go tests as everything looks good. Signed-off-by: Cyril Tovena go mod check Signed-off-by: Cyril Tovena update circle-ci image Signed-off-by: Cyril Tovena Rebased to master Signed-off-by: Cyril Tovena --- .circleci/config.yml | 2 +- docs/configuration/config-file-reference.md | 188 + go.sum | 2 - integration-tests/proto/1.2.1/cortex.pb.go | 7706 ------------ integration-tests/proto/1.2.1/timeseries.go | 256 - integration-tests/proto/1.3.0/cortex.pb.go | 7967 ------------ integration-tests/proto/1.3.0/timeseries.go | 273 - integration-tests/proto/marshal_test.go | 64 - integration-tests/proto/proto_test.go | 437 - pkg/ingester/client/timeseries.go | 6 +- vendor/github.com/google/btree/btree_mem.go | 76 - vendor/github.com/lib/pq/oid/gen.go | 93 - .../miekg/dns/duplicate_generate.go | 144 - vendor/github.com/miekg/dns/msg_generate.go | 328 - vendor/github.com/miekg/dns/types_generate.go | 285 - .../minio/minio-go/v6/functional_tests.go | 10247 ---------------- .../alertmanager/asset/asset_generate.go | 38 - .../weaveworks/common/server/server.go | 29 +- vendor/golang.org/x/net/internal/iana/gen.go | 383 - .../x/net/internal/socket/defs_aix.go | 38 - .../x/net/internal/socket/defs_darwin.go | 36 - .../x/net/internal/socket/defs_dragonfly.go | 36 - .../x/net/internal/socket/defs_freebsd.go | 36 - .../x/net/internal/socket/defs_linux.go | 40 - .../x/net/internal/socket/defs_netbsd.go | 38 - .../x/net/internal/socket/defs_openbsd.go | 36 - .../x/net/internal/socket/defs_solaris.go | 36 - vendor/golang.org/x/net/ipv4/defs_aix.go | 39 - vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 - .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 - vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 - vendor/golang.org/x/net/ipv4/defs_linux.go | 122 - vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 - vendor/golang.org/x/net/ipv4/gen.go | 199 - vendor/golang.org/x/net/ipv6/defs_aix.go | 82 - vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 - .../golang.org/x/net/ipv6/defs_dragonfly.go | 82 - vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 - vendor/golang.org/x/net/ipv6/defs_linux.go | 147 - vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 - vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 - vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 - vendor/golang.org/x/net/ipv6/gen.go | 199 - vendor/golang.org/x/net/publicsuffix/gen.go | 717 -- vendor/golang.org/x/sys/unix/mkasm_darwin.go | 78 - vendor/golang.org/x/sys/unix/mkpost.go | 122 - vendor/golang.org/x/sys/unix/mksyscall.go | 402 - .../x/sys/unix/mksyscall_aix_ppc.go | 415 - .../x/sys/unix/mksyscall_aix_ppc64.go | 614 - .../x/sys/unix/mksyscall_solaris.go | 335 - .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 - vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/types_aix.go | 237 - vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - vendor/golang.org/x/sys/unix/types_freebsd.go | 400 - vendor/golang.org/x/sys/unix/types_netbsd.go | 290 - vendor/golang.org/x/sys/unix/types_openbsd.go | 283 - vendor/golang.org/x/sys/unix/types_solaris.go | 266 - vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/norm/maketables.go | 986 -- .../golang.org/x/text/unicode/norm/triegen.go | 117 - vendor/golang.org/x/text/width/gen.go | 115 - vendor/golang.org/x/text/width/gen_common.go | 96 - vendor/golang.org/x/text/width/gen_trieval.go | 34 - .../x/tools/go/gcexportdata/main.go | 99 - .../x/tools/internal/imports/mkindex.go | 173 - .../x/tools/internal/imports/mkstdlib.go | 128 - .../grpc/test/bufconn/bufconn.go | 308 - vendor/modules.txt | 646 +- 74 files changed, 517 insertions(+), 38227 deletions(-) delete mode 100644 integration-tests/proto/1.2.1/cortex.pb.go delete mode 100644 integration-tests/proto/1.2.1/timeseries.go delete mode 100644 integration-tests/proto/1.3.0/cortex.pb.go delete mode 100644 integration-tests/proto/1.3.0/timeseries.go delete mode 100644 integration-tests/proto/marshal_test.go delete mode 100644 integration-tests/proto/proto_test.go delete mode 100644 vendor/github.com/google/btree/btree_mem.go delete mode 100644 vendor/github.com/lib/pq/oid/gen.go delete mode 100644 vendor/github.com/miekg/dns/duplicate_generate.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/minio/minio-go/v6/functional_tests.go delete mode 100644 vendor/github.com/prometheus/alertmanager/asset/asset_generate.go delete mode 100644 vendor/golang.org/x/net/internal/iana/gen.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_aix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/gen.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/gen.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 vendor/golang.org/x/text/width/gen.go delete mode 100644 vendor/golang.org/x/text/width/gen_common.go delete mode 100644 vendor/golang.org/x/text/width/gen_trieval.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/mkindex.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/mkstdlib.go delete mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 161d1b7b10..ea08a1e0dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ defaults: &defaults docker: - - image: quay.io/cortexproject/build-image:validate-k8s-specs-7c217ee7 + - image: ctovena/cortex-build:update-gogoproto-2294034c working_directory: /go/src/github.com/cortexproject/cortex workflows: diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 1df44c6fd2..8a501dbc1e 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -86,6 +86,14 @@ Supported contents and default values of the config file: # and used by the 'configs' service to expose APIs to manage them. [configdb: ] +<<<<<<< HEAD +======= +# The configstore_config configures the config database storing rules and +# alerts, and is used by the Cortex alertmanager. +# The CLI flags prefix for this block config is: alertmanager +[config_store: ] + +>>>>>>> Removes proto compat go tests as everything looks good. # The alertmanager_config configures the Cortex alertmanager. [alertmanager: ] @@ -97,9 +105,12 @@ runtime_config: # File with the configuration that can be updated in runtime. # CLI flag: -runtime-config.file [file: | default = ""] +<<<<<<< HEAD # The memberlist_config configures the Gossip memberlist. [memberlist: ] +======= +>>>>>>> Removes proto compat go tests as everything looks good. ``` ## `server_config` @@ -163,6 +174,7 @@ The `server_config` configures the HTTP and gRPC server of the launched service( # CLI flag: -server.grpc-max-concurrent-streams [grpc_server_max_concurrent_streams: | default = 100] +<<<<<<< HEAD # The duration after which an idle connection should be closed. Default: # infinity # CLI flag: -server.grpc.keepalive.max-connection-idle @@ -188,6 +200,8 @@ The `server_config` configures the HTTP and gRPC server of the launched service( # CLI flag: -server.grpc.keepalive.timeout [grpc_server_keepalive_timeout: | default = 20s] +======= +>>>>>>> Removes proto compat go tests as everything looks good. # Only log messages with the given severity or above. Valid levels: [debug, # info, warn, error] # CLI flag: -log.level @@ -269,6 +283,13 @@ ha_tracker: # The CLI flags prefix for this block config is: distributor.ha-tracker [etcd: ] +<<<<<<< HEAD +======= + # The memberlist_config configures the Gossip memberlist. + # The CLI flags prefix for this block config is: distributor.ha-tracker + [memberlist: ] + +>>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ha-tracker.multi.primary @@ -322,6 +343,13 @@ ring: # The CLI flags prefix for this block config is: distributor.ring [etcd: ] +<<<<<<< HEAD +======= + # The memberlist_config configures the Gossip memberlist. + # The CLI flags prefix for this block config is: distributor.ring + [memberlist: ] + +>>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ring.multi.primary @@ -393,6 +421,12 @@ lifecycler: # The etcd_config configures the etcd client. [etcd: ] +<<<<<<< HEAD +======= + # The memberlist_config configures the Gossip memberlist. + [memberlist: ] + +>>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -multi.primary @@ -599,7 +633,11 @@ results_cache: # The default validity of entries for caches unless overridden. # CLI flag: -frontend.default-validity +<<<<<<< HEAD [default_validity: | default = 0s] +======= + [defaul_validity: | default = 0s] +>>>>>>> Removes proto compat go tests as everything looks good. background: # How many goroutines to use to write back to memcache. @@ -648,9 +686,16 @@ results_cache: The `ruler_config` configures the Cortex ruler. ```yaml +<<<<<<< HEAD # URL of alerts return path. # CLI flag: -ruler.external.url [externalurl: | default = ] +======= +externalurl: + # URL of alerts return path. + # CLI flag: -ruler.external.url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # How frequently to evaluate rules # CLI flag: -ruler.evaluation-interval @@ -674,9 +719,16 @@ storeconfig: # CLI flag: -ruler.rule-path [rulepath: | default = "/rules"] +<<<<<<< HEAD # URL of the Alertmanager to send notifications to. # CLI flag: -ruler.alertmanager-url [alertmanagerurl: | default = ] +======= +alertmanagerurl: + # URL of the Alertmanager to send notifications to. + # CLI flag: -ruler.alertmanager-url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # Use DNS SRV records to discover alertmanager hosts. # CLI flag: -ruler.alertmanager-discovery @@ -725,6 +777,13 @@ ring: # The CLI flags prefix for this block config is: ruler.ring [etcd: ] +<<<<<<< HEAD +======= + # The memberlist_config configures the Gossip memberlist. + # The CLI flags prefix for this block config is: ruler.ring + [memberlist: ] + +>>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -ruler.ring.multi.primary @@ -758,10 +817,13 @@ ring: # Period with which to attempt to flush rule groups. # CLI flag: -ruler.flush-period [flushcheckperiod: | default = 1m0s] +<<<<<<< HEAD # Enable the ruler api # CLI flag: -experimental.ruler.enable-api [enable_api: | default = false] +======= +>>>>>>> Removes proto compat go tests as everything looks good. ``` ## `alertmanager_config` @@ -777,6 +839,7 @@ The `alertmanager_config` configures the Cortex alertmanager. # CLI flag: -alertmanager.storage.retention [retention: | default = 120h0m0s] +<<<<<<< HEAD # The URL under which Alertmanager is externally reachable (for example, if # Alertmanager is served via a reverse proxy). Used for generating relative and # absolute links back to Alertmanager itself. If the URL has a path portion, it @@ -784,6 +847,17 @@ The `alertmanager_config` configures the Cortex alertmanager. # relevant URL components will be derived automatically. # CLI flag: -alertmanager.web.external-url [externalurl: | default = ] +======= +externalurl: + # The URL under which Alertmanager is externally reachable (for example, if + # Alertmanager is served via a reverse proxy). Used for generating relative + # and absolute links back to Alertmanager itself. If the URL has a path + # portion, it will be used to prefix all HTTP endpoints served by + # Alertmanager. If omitted, relevant URL components will be derived + # automatically. + # CLI flag: -alertmanager.web.external-url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # How frequently to poll Cortex configs # CLI flag: -alertmanager.configs.poll-interval @@ -812,6 +886,7 @@ The `alertmanager_config` configures the Cortex alertmanager. # Root of URL to generate if config is http://internal.monitor # CLI flag: -alertmanager.configs.auto-webhook-root [autowebhookroot: | default = ""] +<<<<<<< HEAD store: # Type of backend to use to store alertmanager configs. Supported values are: @@ -828,6 +903,8 @@ store: # Path at which alertmanager configurations are stored. # CLI flag: -alertmanager.storage.local.path [path: | default = ""] +======= +>>>>>>> Removes proto compat go tests as everything looks good. ``` ## `table_manager_config` @@ -1172,11 +1249,20 @@ The `storage_config` configures where Cortex stores the data (chunks storage eng aws: dynamodbconfig: +<<<<<<< HEAD # DynamoDB endpoint URL with escaped Key and Secret encoded. If only region # is specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. # CLI flag: -dynamodb.url [dynamodb: | default = ] +======= + dynamodb: + # DynamoDB endpoint URL with escaped Key and Secret encoded. If only + # region is specified as a host, proper endpoint will be deduced. Use + # inmemory:/// to use a mock in-memory implementation. + # CLI flag: -dynamodb.url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # DynamoDB table management requests per second limit. # CLI flag: -dynamodb.api-limit @@ -1186,9 +1272,16 @@ aws: # CLI flag: -dynamodb.throttle-limit [throttlelimit: | default = 10] +<<<<<<< HEAD # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. # CLI flag: -applicationautoscaling.url [applicationautoscaling: | default = ] +======= + applicationautoscaling: + # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. + # CLI flag: -applicationautoscaling.url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. metrics: # Use metrics-based autoscaling, via this query URL @@ -1236,11 +1329,20 @@ aws: # CLI flag: -dynamodb.chunk.get.max.parallelism [chunkgetmaxparallelism: | default = 32] +<<<<<<< HEAD # S3 endpoint URL with escaped Key and Secret encoded. If only region is # specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. # CLI flag: -s3.url [s3: | default = ] +======= + s3: + # S3 endpoint URL with escaped Key and Secret encoded. If only region is + # specified as a host, proper endpoint will be deduced. Use + # inmemory:/// to use a mock in-memory implementation. + # CLI flag: -s3.url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # Comma separated list of bucket names to evenly distribute chunks over. # Overrides any buckets specified in s3.url flag @@ -1413,6 +1515,7 @@ cassandra: # CLI flag: -cassandra.password [password: | default = ""] +<<<<<<< HEAD # File containing password to use when connecting to cassandra. # CLI flag: -cassandra.password-file [password_file: | default = ""] @@ -1429,6 +1532,15 @@ cassandra: # Initial connection timeout, used during initial dial to server. # CLI flag: -cassandra.connect-timeout [connect_timeout: | default = 5s] +======= + # Timeout when connecting to cassandra. + # CLI flag: -cassandra.timeout + [timeout: | default = 600ms] + + # Initial connection timeout, used during initial dial to server. + # CLI flag: -cassandra.connect-timeout + [connect_timeout: | default = 600ms] +>>>>>>> Removes proto compat go tests as everything looks good. # Number of retries to perform on a request. (Default is 0: no retries) # CLI flag: -cassandra.max-retries @@ -1465,7 +1577,11 @@ index_queries_cache_config: # Cache config for index entry reading. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-read.default-validity +<<<<<<< HEAD [default_validity: | default = 0s] +======= + [defaul_validity: | default = 0s] +>>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for index entry reading. How many goroutines to use to write @@ -1510,7 +1626,11 @@ chunk_cache_config: # Cache config for chunks. The default validity of entries for caches unless # overridden. # CLI flag: -default-validity +<<<<<<< HEAD [default_validity: | default = 0s] +======= + [defaul_validity: | default = 0s] +>>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for chunks. How many goroutines to use to write back to @@ -1545,7 +1665,11 @@ write_dedupe_cache_config: # Cache config for index entry writing. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-write.default-validity +<<<<<<< HEAD [default_validity: | default = 0s] +======= + [defaul_validity: | default = 0s] +>>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for index entry writing. How many goroutines to use to write @@ -1740,11 +1864,16 @@ The `memberlist_config` configures the Gossip memberlist. ```yaml # Name of the node in memberlist cluster. Defaults to hostname. +<<<<<<< HEAD # CLI flag: -memberlist.nodename +======= +# CLI flag: -.memberlist.nodename +>>>>>>> Removes proto compat go tests as everything looks good. [node_name: | default = ""] # The timeout for establishing a connection with a remote node, and for # read/write operations. Uses memberlist LAN defaults if 0. +<<<<<<< HEAD # CLI flag: -memberlist.stream-timeout [stream_timeout: | default = 0s] @@ -1789,10 +1918,47 @@ The `memberlist_config` configures the Gossip memberlist. # Timeout for leaving memberlist cluster. # CLI flag: -memberlist.leave-timeout +======= +# CLI flag: -.memberlist.stream-timeout +[stream_timeout: | default = 0s] + +# Multiplication factor used when sending out messages (factor * log(N+1)). +# CLI flag: -.memberlist.retransmit-factor +[retransmit_factor: | default = 0] + +# How often to use pull/push sync. Uses memberlist LAN defaults if 0. +# CLI flag: -.memberlist.pullpush-interval +[pull_push_interval: | default = 0s] + +# How often to gossip. Uses memberlist LAN defaults if 0. +# CLI flag: -.memberlist.gossip-interval +[gossip_interval: | default = 0s] + +# How many nodes to gossip to. Uses memberlist LAN defaults if 0. +# CLI flag: -.memberlist.gossip-nodes +[gossip_nodes: | default = 0] + +# Other cluster members to join. Can be specified multiple times. Memberlist +# store is EXPERIMENTAL. +# CLI flag: -.memberlist.join +[join_members: | default = ] + +# If this node fails to join memberlist cluster, abort. +# CLI flag: -.memberlist.abort-if-join-fails +[abort_if_cluster_join_fails: | default = true] + +# How long to keep LEFT ingesters in the ring. +# CLI flag: -.memberlist.left-ingesters-timeout +[left_ingesters_timeout: | default = 5m0s] + +# Timeout for leaving memberlist cluster. +# CLI flag: -.memberlist.leave-timeout +>>>>>>> Removes proto compat go tests as everything looks good. [leave_timeout: | default = 5s] # IP address to listen on for gossip messages. Multiple addresses may be # specified. Defaults to 0.0.0.0 +<<<<<<< HEAD # CLI flag: -memberlist.bind-addr [bind_addr: | default = ] @@ -1806,6 +1972,21 @@ The `memberlist_config` configures the Gossip memberlist. # Timeout for writing 'packet' data. # CLI flag: -memberlist.packet-write-timeout +======= +# CLI flag: -.memberlist.bind-addr +[bind_addr: | default = ] + +# Port to listen on for gossip messages. +# CLI flag: -.memberlist.bind-port +[bind_port: | default = 7946] + +# Timeout used when connecting to other nodes to send packet. +# CLI flag: -.memberlist.packet-dial-timeout +[packet_dial_timeout: | default = 5s] + +# Timeout for writing 'packet' data. +# CLI flag: -.memberlist.packet-write-timeout +>>>>>>> Removes proto compat go tests as everything looks good. [packet_write_timeout: | default = 5s] ``` @@ -2060,9 +2241,16 @@ The `configdb_config` configures the config database storing rules and alerts, a The `configstore_config` configures the config database storing rules and alerts, and is used by the Cortex alertmanager. ```yaml +<<<<<<< HEAD # URL of configs API server. # CLI flag: -.configs.url [configsapiurl: | default = ] +======= +configsapiurl: + # URL of configs API server. + # CLI flag: -.configs.url + [url: | default = ] +>>>>>>> Removes proto compat go tests as everything looks good. # Timeout for requests to Weave Cloud configs service. # CLI flag: -.configs.client-timeout diff --git a/go.sum b/go.sum index d4549234e8..bcad9854dc 100644 --- a/go.sum +++ b/go.sum @@ -776,8 +776,6 @@ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 h1:qi+YkNiB7T3Ikw1DoDIFhdAPbDU7fUPDsKrUoZdupnQ= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1/go.mod h1:7gGdEUJaCrSlWi/mjd68CZv0sfqektYPDcro9cE+M9k= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 h1:nhoCrldzSm1le34sZfSNyTELYxIDaAmDw6PPVoEj5Mw= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1/go.mod h1:KLGX4H1D0lPVfHS/hqO9yrKvDzaT0bqYftIW43iLaOc= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= diff --git a/integration-tests/proto/1.2.1/cortex.pb.go b/integration-tests/proto/1.2.1/cortex.pb.go deleted file mode 100644 index 08b63a9016..0000000000 --- a/integration-tests/proto/1.2.1/cortex.pb.go +++ /dev/null @@ -1,7706 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cortex.proto - -package client - -import ( - bytes "bytes" - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - io "io" - math "math" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type MatchType int32 - -const ( - EQUAL MatchType = 0 - NOT_EQUAL MatchType = 1 - REGEX_MATCH MatchType = 2 - REGEX_NO_MATCH MatchType = 3 -) - -var MatchType_name = map[int32]string{ - 0: "EQUAL", - 1: "NOT_EQUAL", - 2: "REGEX_MATCH", - 3: "REGEX_NO_MATCH", -} - -var MatchType_value = map[string]int32{ - "EQUAL": 0, - "NOT_EQUAL": 1, - "REGEX_MATCH": 2, - "REGEX_NO_MATCH": 3, -} - -func (MatchType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0} -} - -type WriteRequest_SourceEnum int32 - -const ( - API WriteRequest_SourceEnum = 0 - RULE WriteRequest_SourceEnum = 1 -) - -var WriteRequest_SourceEnum_name = map[int32]string{ - 0: "API", - 1: "RULE", -} - -var WriteRequest_SourceEnum_value = map[string]int32{ - "API": 0, - "RULE": 1, -} - -func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0, 0} -} - -type WriteRequest struct { - Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` - Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0} -} -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { - if m != nil { - return m.Source - } - return API -} - -type WriteResponse struct { -} - -func (m *WriteResponse) Reset() { *m = WriteResponse{} } -func (*WriteResponse) ProtoMessage() {} -func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{1} -} -func (m *WriteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteResponse.Merge(m, src) -} -func (m *WriteResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteResponse proto.InternalMessageInfo - -type ReadRequest struct { - Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` -} - -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{2} -} -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -func (m *ReadRequest) GetQueries() []*QueryRequest { - if m != nil { - return m.Queries - } - return nil -} - -type ReadResponse struct { - Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` -} - -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{3} -} -func (m *ReadResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResponse.Merge(m, src) -} -func (m *ReadResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResponse proto.InternalMessageInfo - -func (m *ReadResponse) GetResults() []*QueryResponse { - if m != nil { - return m.Results - } - return nil -} - -type QueryRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *QueryRequest) Reset() { *m = QueryRequest{} } -func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{4} -} -func (m *QueryRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRequest.Merge(m, src) -} -func (m *QueryRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRequest proto.InternalMessageInfo - -func (m *QueryRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type QueryResponse struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryResponse) Reset() { *m = QueryResponse{} } -func (*QueryResponse) ProtoMessage() {} -func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{5} -} -func (m *QueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResponse.Merge(m, src) -} -func (m *QueryResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResponse proto.InternalMessageInfo - -func (m *QueryResponse) GetTimeseries() []TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -// QueryStreamResponse contains a batch of timeseries chunks. -type QueryStreamResponse struct { - Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } -func (*QueryStreamResponse) ProtoMessage() {} -func (*QueryStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{6} -} -func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryStreamResponse.Merge(m, src) -} -func (m *QueryStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryStreamResponse proto.InternalMessageInfo - -func (m *QueryStreamResponse) GetTimeseries() []TimeSeriesChunk { - if m != nil { - return m.Timeseries - } - return nil -} - -type LabelValuesRequest struct { - LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` -} - -func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } -func (*LabelValuesRequest) ProtoMessage() {} -func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{7} -} -func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesRequest.Merge(m, src) -} -func (m *LabelValuesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo - -func (m *LabelValuesRequest) GetLabelName() string { - if m != nil { - return m.LabelName - } - return "" -} - -type LabelValuesResponse struct { - LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` -} - -func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } -func (*LabelValuesResponse) ProtoMessage() {} -func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{8} -} -func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesResponse.Merge(m, src) -} -func (m *LabelValuesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo - -func (m *LabelValuesResponse) GetLabelValues() []string { - if m != nil { - return m.LabelValues - } - return nil -} - -type LabelNamesRequest struct { -} - -func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } -func (*LabelNamesRequest) ProtoMessage() {} -func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{9} -} -func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesRequest.Merge(m, src) -} -func (m *LabelNamesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo - -type LabelNamesResponse struct { - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } -func (*LabelNamesResponse) ProtoMessage() {} -func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{10} -} -func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesResponse.Merge(m, src) -} -func (m *LabelNamesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo - -func (m *LabelNamesResponse) GetLabelNames() []string { - if m != nil { - return m.LabelNames - } - return nil -} - -type UserStatsRequest struct { -} - -func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } -func (*UserStatsRequest) ProtoMessage() {} -func (*UserStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{11} -} -func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsRequest.Merge(m, src) -} -func (m *UserStatsRequest) XXX_Size() int { - return m.Size() -} -func (m *UserStatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsRequest proto.InternalMessageInfo - -type UserStatsResponse struct { - IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate,proto3" json:"ingestion_rate,omitempty"` - NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` - ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` - RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` -} - -func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } -func (*UserStatsResponse) ProtoMessage() {} -func (*UserStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{12} -} -func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsResponse.Merge(m, src) -} -func (m *UserStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsResponse proto.InternalMessageInfo - -func (m *UserStatsResponse) GetIngestionRate() float64 { - if m != nil { - return m.IngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetNumSeries() uint64 { - if m != nil { - return m.NumSeries - } - return 0 -} - -func (m *UserStatsResponse) GetApiIngestionRate() float64 { - if m != nil { - return m.ApiIngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetRuleIngestionRate() float64 { - if m != nil { - return m.RuleIngestionRate - } - return 0 -} - -type UserIDStatsResponse struct { - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } -func (*UserIDStatsResponse) ProtoMessage() {} -func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{13} -} -func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserIDStatsResponse.Merge(m, src) -} -func (m *UserIDStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserIDStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserIDStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserIDStatsResponse proto.InternalMessageInfo - -func (m *UserIDStatsResponse) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *UserIDStatsResponse) GetData() *UserStatsResponse { - if m != nil { - return m.Data - } - return nil -} - -type UsersStatsResponse struct { - Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } -func (*UsersStatsResponse) ProtoMessage() {} -func (*UsersStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{14} -} -func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsersStatsResponse.Merge(m, src) -} -func (m *UsersStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UsersStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UsersStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UsersStatsResponse proto.InternalMessageInfo - -func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { - if m != nil { - return m.Stats - } - return nil -} - -type MetricsForLabelMatchersRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` -} - -func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } -func (*MetricsForLabelMatchersRequest) ProtoMessage() {} -func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{15} -} -func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) -} -func (m *MetricsForLabelMatchersRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersRequest proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { - if m != nil { - return m.MatchersSet - } - return nil -} - -type MetricsForLabelMatchersResponse struct { - Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` -} - -func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } -func (*MetricsForLabelMatchersResponse) ProtoMessage() {} -func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{16} -} -func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) -} -func (m *MetricsForLabelMatchersResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersResponse proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -type TimeSeriesChunk struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` - Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } -func (*TimeSeriesChunk) ProtoMessage() {} -func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{17} -} -func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesChunk.Merge(m, src) -} -func (m *TimeSeriesChunk) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesChunk) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo - -func (m *TimeSeriesChunk) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesChunk) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesChunk) GetChunks() []Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type Chunk struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Encoding int32 `protobuf:"varint,3,opt,name=encoding,proto3" json:"encoding,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Chunk) Reset() { *m = Chunk{} } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{18} -} -func (m *Chunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(m, src) -} -func (m *Chunk) XXX_Size() int { - return m.Size() -} -func (m *Chunk) XXX_DiscardUnknown() { - xxx_messageInfo_Chunk.DiscardUnknown(m) -} - -var xxx_messageInfo_Chunk proto.InternalMessageInfo - -func (m *Chunk) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Chunk) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *Chunk) GetEncoding() int32 { - if m != nil { - return m.Encoding - } - return 0 -} - -func (m *Chunk) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type TransferChunksResponse struct { -} - -func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } -func (*TransferChunksResponse) ProtoMessage() {} -func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{19} -} -func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferChunksResponse.Merge(m, src) -} -func (m *TransferChunksResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferChunksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo - -type TimeSeries struct { - Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` - // Sorted by time, oldest sample first. - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{20} -} -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return m.Size() -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetSamples() []Sample { - if m != nil { - return m.Samples - } - return nil -} - -type LabelPair struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{21} -} -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return m.Size() -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LabelPair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{22} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Sample) GetTimestampMs() int64 { - if m != nil { - return m.TimestampMs - } - return 0 -} - -type LabelMatchers struct { - Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } -func (*LabelMatchers) ProtoMessage() {} -func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{23} -} -func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatchers) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatchers.Merge(m, src) -} -func (m *LabelMatchers) XXX_Size() int { - return m.Size() -} -func (m *LabelMatchers) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatchers.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo - -func (m *LabelMatchers) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type Metric struct { - Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{24} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -type LabelMatcher struct { - Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{25} -} -func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatcher) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatcher.Merge(m, src) -} -func (m *LabelMatcher) XXX_Size() int { - return m.Size() -} -func (m *LabelMatcher) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatcher.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo - -func (m *LabelMatcher) GetType() MatchType { - if m != nil { - return m.Type - } - return EQUAL -} - -func (m *LabelMatcher) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LabelMatcher) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type TimeSeriesFile struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } -func (*TimeSeriesFile) ProtoMessage() {} -func (*TimeSeriesFile) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{26} -} -func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesFile.Merge(m, src) -} -func (m *TimeSeriesFile) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesFile) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo - -func (m *TimeSeriesFile) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesFile) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesFile) GetFilename() string { - if m != nil { - return m.Filename - } - return "" -} - -func (m *TimeSeriesFile) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type TransferTSDBResponse struct { -} - -func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } -func (*TransferTSDBResponse) ProtoMessage() {} -func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{27} -} -func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferTSDBResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferTSDBResponse.Merge(m, src) -} -func (m *TransferTSDBResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferTSDBResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferTSDBResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("cortex.MatchType1.2", MatchType_name, MatchType_value) - proto.RegisterEnum("cortex.WriteRequest_SourceEnum1.2", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) - proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest1.2") - proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse1.2") - proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest1.2") - proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse1.2") - proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest1.2") - proto.RegisterType((*QueryResponse)(nil), "cortex.QueryResponse1.2") - proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse1.2") - proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest1.2") - proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse1.2") - proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest1.2") - proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse1.2") - proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest1.2") - proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse1.2") - proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse1.2") - proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse1.2") - proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest1.2") - proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse1.2") - proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk1.2") - proto.RegisterType((*Chunk)(nil), "cortex.Chunk1.2") - proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse1.2") - proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries1.2") - proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair1.2") - proto.RegisterType((*Sample)(nil), "cortex.Sample1.2") - proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers1.2") - proto.RegisterType((*Metric)(nil), "cortex.Metric1.2") - proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher1.2") - proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile1.2") - proto.RegisterType((*TransferTSDBResponse)(nil), "cortex.TransferTSDBResponse1.2") -} - -func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } - -var fileDescriptor_893a47d0a749d749 = []byte{ - // 1266 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xdf, 0x8d, 0x5f, 0x12, 0x3f, 0xde, 0xb8, 0xce, 0x24, 0x6d, 0xd3, 0xed, 0xff, 0xbf, 0x29, - 0x23, 0xb5, 0x44, 0x40, 0xdd, 0x92, 0xaa, 0xd0, 0x03, 0x55, 0xe5, 0xb4, 0x69, 0x6b, 0x94, 0xa4, - 0xe9, 0xd8, 0x05, 0x84, 0x84, 0xac, 0x8d, 0x3d, 0x49, 0x56, 0xec, 0x8b, 0xbb, 0x33, 0x8b, 0xe8, - 0x01, 0x09, 0x89, 0x0f, 0x00, 0x47, 0x3e, 0x02, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8f, 0x15, - 0x87, 0x8a, 0x3a, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xee, 0x5a, 0x44, 0x40, - 0x6f, 0x3b, 0xcf, 0xf3, 0x9b, 0xdf, 0x3c, 0xaf, 0x33, 0xcf, 0x82, 0x31, 0x08, 0x42, 0x4e, 0xbf, - 0x6e, 0x8d, 0xc2, 0x80, 0x07, 0xa8, 0x2a, 0x57, 0xe6, 0xe5, 0x43, 0x87, 0x1f, 0x45, 0xfb, 0xad, - 0x41, 0xe0, 0x5d, 0x39, 0x0c, 0x0e, 0x83, 0x2b, 0x42, 0xbd, 0x1f, 0x1d, 0x88, 0x95, 0x58, 0x88, - 0x2f, 0xb9, 0x0d, 0xff, 0xaa, 0x83, 0xf1, 0x69, 0xe8, 0x70, 0x4a, 0xe8, 0xe3, 0x88, 0x32, 0x8e, - 0x76, 0x01, 0xb8, 0xe3, 0x51, 0x46, 0x43, 0x87, 0xb2, 0x55, 0xfd, 0x42, 0x69, 0xbd, 0xbe, 0x81, - 0x5a, 0xea, 0xa8, 0x9e, 0xe3, 0xd1, 0xae, 0xd0, 0x6c, 0x9a, 0x4f, 0x5f, 0xac, 0x69, 0xbf, 0xbf, - 0x58, 0x43, 0x7b, 0x21, 0xb5, 0x5d, 0x37, 0x18, 0xf4, 0xd2, 0x5d, 0x24, 0xc3, 0x80, 0x3e, 0x84, - 0x6a, 0x37, 0x88, 0xc2, 0x01, 0x5d, 0x9d, 0xbb, 0xa0, 0xaf, 0x37, 0x36, 0xd6, 0x12, 0xae, 0xec, - 0xa9, 0x2d, 0x09, 0xd9, 0xf2, 0x23, 0x8f, 0x54, 0x99, 0xf8, 0xc6, 0x6b, 0x00, 0x13, 0x29, 0x9a, - 0x87, 0x52, 0x7b, 0xaf, 0xd3, 0xd4, 0xd0, 0x02, 0x94, 0xc9, 0xa3, 0xed, 0xad, 0xa6, 0x8e, 0x4f, - 0xc1, 0xa2, 0xe2, 0x60, 0xa3, 0xc0, 0x67, 0x14, 0xdf, 0x84, 0x3a, 0xa1, 0xf6, 0x30, 0xf1, 0xa4, - 0x05, 0xf3, 0x8f, 0xa3, 0xac, 0x1b, 0x2b, 0xc9, 0xd1, 0x0f, 0x23, 0x1a, 0x3e, 0x51, 0x30, 0x92, - 0x80, 0xf0, 0x2d, 0x30, 0xe4, 0x76, 0x49, 0x87, 0xae, 0xc0, 0x7c, 0x48, 0x59, 0xe4, 0xf2, 0x64, - 0xff, 0xe9, 0xa9, 0xfd, 0x12, 0x47, 0x12, 0x14, 0xfe, 0x51, 0x07, 0x23, 0x4b, 0x8d, 0xde, 0x03, - 0xc4, 0xb8, 0x1d, 0xf2, 0xbe, 0x88, 0x07, 0xb7, 0xbd, 0x51, 0xdf, 0x8b, 0xc9, 0xf4, 0xf5, 0x12, - 0x69, 0x0a, 0x4d, 0x2f, 0x51, 0xec, 0x30, 0xb4, 0x0e, 0x4d, 0xea, 0x0f, 0xf3, 0xd8, 0x39, 0x81, - 0x6d, 0x50, 0x7f, 0x98, 0x45, 0x5e, 0x85, 0x05, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xd5, 0x52, - 0xde, 0xb5, 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x49, 0x52, 0x14, 0xee, 0xc0, 0x62, 0xce, 0x68, - 0x74, 0xe3, 0x84, 0x69, 0x2e, 0xc7, 0x69, 0xce, 0x26, 0x14, 0xf7, 0x60, 0x59, 0x50, 0x75, 0x79, - 0x48, 0x6d, 0x2f, 0x25, 0xbc, 0x59, 0x40, 0x78, 0xf6, 0x75, 0xc2, 0xdb, 0x47, 0x91, 0xff, 0x65, - 0x01, 0xeb, 0x35, 0x40, 0xc2, 0xf4, 0x4f, 0x6c, 0x37, 0xa2, 0x2c, 0x09, 0xe0, 0xff, 0x01, 0xdc, - 0x58, 0xda, 0xf7, 0x6d, 0x8f, 0x8a, 0xc0, 0xd5, 0x48, 0x4d, 0x48, 0x76, 0x6d, 0x8f, 0xe2, 0x1b, - 0xb0, 0x9c, 0xdb, 0xa4, 0x4c, 0x79, 0x0b, 0x0c, 0xb9, 0xeb, 0x2b, 0x21, 0x17, 0xc6, 0xd4, 0x48, - 0xdd, 0x9d, 0x40, 0xf1, 0x32, 0x2c, 0x6d, 0x27, 0x34, 0xc9, 0x69, 0xf8, 0xba, 0xb2, 0x41, 0x09, - 0x15, 0xdb, 0x1a, 0xd4, 0x27, 0x36, 0x24, 0x64, 0x90, 0x1a, 0xc1, 0x30, 0x82, 0xe6, 0x23, 0x46, - 0xc3, 0x2e, 0xb7, 0x79, 0x4a, 0xf5, 0x8b, 0x0e, 0x4b, 0x19, 0xa1, 0xa2, 0xba, 0x08, 0x0d, 0xc7, - 0x3f, 0xa4, 0x8c, 0x3b, 0x81, 0xdf, 0x0f, 0x6d, 0x2e, 0x5d, 0xd2, 0xc9, 0x62, 0x2a, 0x25, 0x36, - 0xa7, 0xb1, 0xd7, 0x7e, 0xe4, 0xf5, 0x55, 0x28, 0xe3, 0x12, 0x28, 0x93, 0x9a, 0x1f, 0x79, 0x32, - 0x82, 0x71, 0x55, 0xd9, 0x23, 0xa7, 0x3f, 0xc5, 0x54, 0x12, 0x4c, 0x4d, 0x7b, 0xe4, 0x74, 0x72, - 0x64, 0x2d, 0x58, 0x0e, 0x23, 0x97, 0x4e, 0xc3, 0xcb, 0x02, 0xbe, 0x14, 0xab, 0x72, 0x78, 0xfc, - 0x05, 0x2c, 0xc7, 0x86, 0x77, 0xee, 0xe4, 0x4d, 0x3f, 0x0b, 0xf3, 0x11, 0xa3, 0x61, 0xdf, 0x19, - 0xaa, 0x34, 0x54, 0xe3, 0x65, 0x67, 0x88, 0x2e, 0x43, 0x79, 0x68, 0x73, 0x5b, 0x98, 0x59, 0xdf, - 0x38, 0x97, 0x64, 0xfc, 0x35, 0xe7, 0x89, 0x80, 0xe1, 0x7b, 0x80, 0x62, 0x15, 0xcb, 0xb3, 0xbf, - 0x0f, 0x15, 0x16, 0x0b, 0x54, 0xdd, 0x9c, 0xcf, 0xb2, 0x4c, 0x59, 0x42, 0x24, 0x12, 0xff, 0xac, - 0x83, 0xb5, 0x43, 0x79, 0xe8, 0x0c, 0xd8, 0xdd, 0x20, 0xcc, 0x96, 0x3d, 0x7b, 0xd3, 0xed, 0x77, - 0x03, 0x8c, 0xa4, 0xb1, 0xfa, 0x8c, 0x72, 0xd5, 0x82, 0xa7, 0x8b, 0x5a, 0x90, 0x91, 0x7a, 0x02, - 0xed, 0x52, 0x8e, 0x3b, 0xb0, 0x36, 0xd3, 0x66, 0x15, 0x8a, 0x4b, 0x50, 0xf5, 0x04, 0x44, 0xc5, - 0xa2, 0x91, 0xd0, 0xca, 0x8d, 0x44, 0x69, 0xf1, 0x6f, 0x3a, 0x9c, 0x9a, 0x6a, 0xab, 0xd8, 0x85, - 0x83, 0x30, 0xf0, 0x54, 0xae, 0xb3, 0xd9, 0x6a, 0xc4, 0xf2, 0x8e, 0x12, 0x77, 0x86, 0xd9, 0x74, - 0xce, 0xe5, 0xd2, 0x79, 0x0b, 0xaa, 0xa2, 0xb4, 0x93, 0x8b, 0x65, 0x29, 0xe7, 0xd5, 0x9e, 0xed, - 0x84, 0x9b, 0x2b, 0xea, 0xe6, 0x37, 0x84, 0xa8, 0x3d, 0xb4, 0x47, 0x9c, 0x86, 0x44, 0x6d, 0x43, - 0xef, 0x42, 0x75, 0x10, 0x1b, 0xc3, 0x56, 0xcb, 0x82, 0x60, 0x31, 0x21, 0xc8, 0x76, 0xbe, 0x82, - 0xe0, 0xef, 0x75, 0xa8, 0x48, 0xd3, 0xdf, 0x54, 0xae, 0x4c, 0x58, 0xa0, 0xfe, 0x20, 0x18, 0x3a, - 0xfe, 0xa1, 0x68, 0x91, 0x0a, 0x49, 0xd7, 0x08, 0xa9, 0xd2, 0x8d, 0x7b, 0xc1, 0x50, 0xf5, 0xb9, - 0x0a, 0x67, 0x7a, 0xa1, 0xed, 0xb3, 0x03, 0x1a, 0x0a, 0xc3, 0xd2, 0xc4, 0xe0, 0x6f, 0x00, 0x26, - 0xf1, 0xce, 0xc4, 0x49, 0xff, 0x67, 0x71, 0x6a, 0xc1, 0x3c, 0xb3, 0xbd, 0x91, 0x2b, 0x3a, 0x3c, - 0x97, 0xe8, 0xae, 0x10, 0xab, 0x48, 0x25, 0x20, 0x7c, 0x1d, 0x6a, 0x29, 0x75, 0x6c, 0x79, 0x7a, - 0x23, 0x1a, 0x44, 0x7c, 0xa3, 0x15, 0xa8, 0x88, 0xfb, 0x4e, 0x04, 0xc2, 0x20, 0x72, 0x81, 0xdb, - 0x50, 0x95, 0x7c, 0x13, 0xbd, 0xbc, 0x73, 0xe4, 0x22, 0xbe, 0x2b, 0x0b, 0xa2, 0x58, 0xe7, 0x93, - 0x10, 0xe2, 0x36, 0x2c, 0xe6, 0x4a, 0x35, 0xf7, 0xfc, 0xe8, 0x27, 0x7c, 0x7e, 0xaa, 0xb2, 0x7c, - 0xff, 0x75, 0xdc, 0x70, 0x1f, 0x8c, 0xec, 0x21, 0xe8, 0x22, 0x94, 0xf9, 0x93, 0x91, 0xf4, 0xaa, - 0x31, 0xa1, 0x13, 0xea, 0xde, 0x93, 0x11, 0x25, 0x42, 0x9d, 0x46, 0x4c, 0x56, 0xfb, 0x54, 0xc4, - 0x4a, 0x42, 0xa8, 0x22, 0xf6, 0x9d, 0x0e, 0x8d, 0x49, 0xa2, 0xef, 0x3a, 0x2e, 0xfd, 0x2f, 0xfa, - 0xca, 0x84, 0x85, 0x03, 0xc7, 0xa5, 0xc2, 0x06, 0x79, 0x5c, 0xba, 0x2e, 0xac, 0xc3, 0x33, 0xb0, - 0x92, 0xd4, 0x61, 0xaf, 0x7b, 0x67, 0x33, 0xa9, 0xc2, 0x77, 0x3e, 0x86, 0x5a, 0xea, 0x1a, 0xaa, - 0x41, 0x65, 0xeb, 0xe1, 0xa3, 0xf6, 0x76, 0x53, 0x43, 0x8b, 0x50, 0xdb, 0x7d, 0xd0, 0xeb, 0xcb, - 0xa5, 0x8e, 0x4e, 0x41, 0x9d, 0x6c, 0xdd, 0xdb, 0xfa, 0xac, 0xbf, 0xd3, 0xee, 0xdd, 0xbe, 0xdf, - 0x9c, 0x43, 0x08, 0x1a, 0x52, 0xb0, 0xfb, 0x40, 0xc9, 0x4a, 0x1b, 0xc7, 0x15, 0x58, 0x48, 0x6c, - 0x47, 0xd7, 0xa1, 0xbc, 0x17, 0xb1, 0x23, 0xb4, 0x52, 0x34, 0x9f, 0x99, 0xa7, 0xa7, 0xa4, 0xaa, - 0x27, 0x34, 0xf4, 0x01, 0x54, 0xc4, 0x34, 0x80, 0x0a, 0x87, 0x2b, 0xb3, 0x78, 0x64, 0xc2, 0x1a, - 0xba, 0x03, 0xf5, 0xcc, 0x14, 0x31, 0x63, 0xf7, 0xf9, 0x9c, 0x34, 0x3f, 0x70, 0x60, 0xed, 0xaa, - 0x8e, 0xee, 0x43, 0x3d, 0x33, 0x00, 0x20, 0x33, 0x57, 0x4c, 0xb9, 0x51, 0x62, 0xc2, 0x55, 0x30, - 0x31, 0x60, 0x0d, 0x6d, 0x01, 0x4c, 0xde, 0x7e, 0x74, 0x2e, 0x07, 0xce, 0x0e, 0x09, 0xa6, 0x59, - 0xa4, 0x4a, 0x69, 0x36, 0xa1, 0x96, 0xbe, 0x7c, 0x68, 0xb5, 0xe0, 0x31, 0x94, 0x24, 0xb3, 0x9f, - 0x49, 0xac, 0xa1, 0xbb, 0x60, 0xb4, 0x5d, 0xf7, 0x24, 0x34, 0x66, 0x56, 0xc3, 0xa6, 0x79, 0x5c, - 0x38, 0x3b, 0xe3, 0xb1, 0x41, 0x97, 0xf2, 0x8f, 0xca, 0xac, 0x17, 0xd4, 0x7c, 0xfb, 0x6f, 0x71, - 0xe9, 0x69, 0x3b, 0xd0, 0xc8, 0x5f, 0x9c, 0x68, 0xd6, 0xf4, 0x67, 0x5a, 0xa9, 0xa2, 0xf8, 0xa6, - 0xd5, 0xd6, 0xe3, 0xcc, 0x1a, 0xd9, 0xfa, 0x47, 0x67, 0x5e, 0x27, 0x8b, 0x5b, 0xd3, 0xfc, 0xdf, - 0x34, 0x57, 0xb6, 0x5b, 0x62, 0xa6, 0xcd, 0x8f, 0x9e, 0xbd, 0xb4, 0xb4, 0xe7, 0x2f, 0x2d, 0xed, - 0xd5, 0x4b, 0x4b, 0xff, 0x76, 0x6c, 0xe9, 0x3f, 0x8d, 0x2d, 0xfd, 0xe9, 0xd8, 0xd2, 0x9f, 0x8d, - 0x2d, 0xfd, 0x8f, 0xb1, 0xa5, 0xff, 0x39, 0xb6, 0xb4, 0x57, 0x63, 0x4b, 0xff, 0xe1, 0xd8, 0xd2, - 0x9e, 0x1d, 0x5b, 0xda, 0xf3, 0x63, 0x4b, 0xfb, 0xbc, 0x3a, 0x70, 0x1d, 0xea, 0xf3, 0xfd, 0xaa, - 0xf8, 0x4d, 0xba, 0xf6, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x33, 0x55, 0x6d, 0x0d, - 0x00, 0x00, -} - -func (x MatchType) String() string { - s, ok := MatchType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x WriteRequest_SourceEnum) String() string { - s, ok := WriteRequest_SourceEnum_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *WriteRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteRequest) - if !ok { - that2, ok := that.(WriteRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(that1.Timeseries[i]) { - return false - } - } - if this.Source != that1.Source { - return false - } - return true -} -func (this *WriteResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteResponse) - if !ok { - that2, ok := that.(WriteResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *ReadRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadRequest) - if !ok { - that2, ok := that.(ReadRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Queries) != len(that1.Queries) { - return false - } - for i := range this.Queries { - if !this.Queries[i].Equal(that1.Queries[i]) { - return false - } - } - return true -} -func (this *ReadResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadResponse) - if !ok { - that2, ok := that.(ReadResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Results) != len(that1.Results) { - return false - } - for i := range this.Results { - if !this.Results[i].Equal(that1.Results[i]) { - return false - } - } - return true -} -func (this *QueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryRequest) - if !ok { - that2, ok := that.(QueryRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *QueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryResponse) - if !ok { - that2, ok := that.(QueryResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *QueryStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryStreamResponse) - if !ok { - that2, ok := that.(QueryStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *LabelValuesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesRequest) - if !ok { - that2, ok := that.(LabelValuesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.LabelName != that1.LabelName { - return false - } - return true -} -func (this *LabelValuesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesResponse) - if !ok { - that2, ok := that.(LabelValuesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelValues) != len(that1.LabelValues) { - return false - } - for i := range this.LabelValues { - if this.LabelValues[i] != that1.LabelValues[i] { - return false - } - } - return true -} -func (this *LabelNamesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesRequest) - if !ok { - that2, ok := that.(LabelNamesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *LabelNamesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesResponse) - if !ok { - that2, ok := that.(LabelNamesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelNames) != len(that1.LabelNames) { - return false - } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { - return false - } - } - return true -} -func (this *UserStatsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsRequest) - if !ok { - that2, ok := that.(UserStatsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *UserStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsResponse) - if !ok { - that2, ok := that.(UserStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.IngestionRate != that1.IngestionRate { - return false - } - if this.NumSeries != that1.NumSeries { - return false - } - if this.ApiIngestionRate != that1.ApiIngestionRate { - return false - } - if this.RuleIngestionRate != that1.RuleIngestionRate { - return false - } - return true -} -func (this *UserIDStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserIDStatsResponse) - if !ok { - that2, ok := that.(UserIDStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.UserId != that1.UserId { - return false - } - if !this.Data.Equal(that1.Data) { - return false - } - return true -} -func (this *UsersStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UsersStatsResponse) - if !ok { - that2, ok := that.(UsersStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Stats) != len(that1.Stats) { - return false - } - for i := range this.Stats { - if !this.Stats[i].Equal(that1.Stats[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersRequest) - if !ok { - that2, ok := that.(MetricsForLabelMatchersRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.MatchersSet) != len(that1.MatchersSet) { - return false - } - for i := range this.MatchersSet { - if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersResponse) - if !ok { - that2, ok := that.(MetricsForLabelMatchersResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Metric) != len(that1.Metric) { - return false - } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { - return false - } - } - return true -} -func (this *TimeSeriesChunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesChunk) - if !ok { - that2, ok := that.(TimeSeriesChunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *Chunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Chunk) - if !ok { - that2, ok := that.(Chunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if this.Encoding != that1.Encoding { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *TransferChunksResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferChunksResponse) - if !ok { - that2, ok := that.(TransferChunksResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *TimeSeries) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeries) - if !ok { - that2, ok := that.(TimeSeries) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Samples) != len(that1.Samples) { - return false - } - for i := range this.Samples { - if !this.Samples[i].Equal(&that1.Samples[i]) { - return false - } - } - return true -} -func (this *LabelPair) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelPair) - if !ok { - that2, ok := that.(LabelPair) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Name, that1.Name) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - return true -} -func (this *Sample) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Sample) - if !ok { - that2, ok := that.(Sample) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Value != that1.Value { - return false - } - if this.TimestampMs != that1.TimestampMs { - return false - } - return true -} -func (this *LabelMatchers) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatchers) - if !ok { - that2, ok := that.(LabelMatchers) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *Metric) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Metric) - if !ok { - that2, ok := that.(Metric) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - return true -} -func (this *LabelMatcher) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatcher) - if !ok { - that2, ok := that.(LabelMatcher) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.Name != that1.Name { - return false - } - if this.Value != that1.Value { - return false - } - return true -} -func (this *TimeSeriesFile) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesFile) - if !ok { - that2, ok := that.(TimeSeriesFile) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if this.Filename != that1.Filename { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *TransferTSDBResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferTSDBResponse) - if !ok { - that2, ok := that.(TransferTSDBResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *WriteRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.WriteRequest{") - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") - s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WriteResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.WriteResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadRequest{") - if this.Queries != nil { - s = append(s, "Queries: "+fmt.Sprintf("%#v", this.Queries)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadResponse{") - if this.Results != nil { - s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.QueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.QueryResponse{") - if this.Timeseries != nil { - vs := make([]*TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.QueryStreamResponse{") - if this.Timeseries != nil { - vs := make([]*TimeSeriesChunk, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesRequest{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesResponse{") - s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.LabelNamesRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesResponse{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.UserStatsRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.UserStatsResponse{") - s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") - s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") - s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") - s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserIDStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.UserIDStatsResponse{") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Data != nil { - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UsersStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.UsersStatsResponse{") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.MetricsForLabelMatchersRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.MatchersSet != nil { - s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsForLabelMatchersResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesChunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesChunk{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*Chunk, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Chunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.Chunk{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TransferChunksResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.TransferChunksResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeries) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.TimeSeries{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Samples != nil { - vs := make([]*Sample, len(this.Samples)) - for i := range vs { - vs[i] = &this.Samples[i] - } - s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelPair) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.LabelPair{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Sample) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.Sample{") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatchers) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelMatchers{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Metric) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.Metric{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatcher) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.LabelMatcher{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesFile) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesFile{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TransferTSDBResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.TransferTSDBResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringCortex(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IngesterClient is the client API for Ingester service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngesterClient interface { - Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) - Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) - QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) - LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) - LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) - UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) - AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) - MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) - // TransferTSDB transfers all files of a tsdb to a joining ingester - TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) -} - -type ingesterClient struct { - cc *grpc.ClientConn -} - -func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { - return &ingesterClient{cc} -} - -func (c *ingesterClient) Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { - out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { - out := new(QueryResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Query", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterQueryStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_QueryStreamClient interface { - Recv() (*QueryStreamResponse, error) - grpc.ClientStream -} - -type ingesterQueryStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { - m := new(QueryStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { - out := new(LabelValuesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { - out := new(LabelNamesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { - out := new(UserStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { - out := new(UsersStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { - out := new(MetricsForLabelMatchersResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/TransferChunks", opts...) - if err != nil { - return nil, err - } - x := &ingesterTransferChunksClient{stream} - return x, nil -} - -type Ingester_TransferChunksClient interface { - Send(*TimeSeriesChunk) error - CloseAndRecv() (*TransferChunksResponse, error) - grpc.ClientStream -} - -type ingesterTransferChunksClient struct { - grpc.ClientStream -} - -func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { - return x.ClientStream.SendMsg(m) -} - -func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(TransferChunksResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/TransferTSDB", opts...) - if err != nil { - return nil, err - } - x := &ingesterTransferTSDBClient{stream} - return x, nil -} - -type Ingester_TransferTSDBClient interface { - Send(*TimeSeriesFile) error - CloseAndRecv() (*TransferTSDBResponse, error) - grpc.ClientStream -} - -type ingesterTransferTSDBClient struct { - grpc.ClientStream -} - -func (x *ingesterTransferTSDBClient) Send(m *TimeSeriesFile) error { - return x.ClientStream.SendMsg(m) -} - -func (x *ingesterTransferTSDBClient) CloseAndRecv() (*TransferTSDBResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(TransferTSDBResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// IngesterServer is the server API for Ingester service. -type IngesterServer interface { - Push(context.Context, *WriteRequest) (*WriteResponse, error) - Query(context.Context, *QueryRequest) (*QueryResponse, error) - QueryStream(*QueryRequest, Ingester_QueryStreamServer) error - LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) - LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) - UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) - AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) - MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(Ingester_TransferChunksServer) error - // TransferTSDB transfers all files of a tsdb to a joining ingester - TransferTSDB(Ingester_TransferTSDBServer) error -} - -func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { - s.RegisterService(&_Ingester_serviceDesc, srv) -} - -func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Push(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Push", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Push(ctx, req.(*WriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Query(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Query", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Query(ctx, req.(*QueryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(QueryRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) -} - -type Ingester_QueryStreamServer interface { - Send(*QueryStreamResponse) error - grpc.ServerStream -} - -type ingesterQueryStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelValues(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelValues", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).UserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/UserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).AllUserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/AllUserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsForLabelMatchersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) -} - -type Ingester_TransferChunksServer interface { - SendAndClose(*TransferChunksResponse) error - Recv() (*TimeSeriesChunk, error) - grpc.ServerStream -} - -type ingesterTransferChunksServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { - m := new(TimeSeriesChunk) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Ingester_TransferTSDB_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferTSDB(&ingesterTransferTSDBServer{stream}) -} - -type Ingester_TransferTSDBServer interface { - SendAndClose(*TransferTSDBResponse) error - Recv() (*TimeSeriesFile, error) - grpc.ServerStream -} - -type ingesterTransferTSDBServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferTSDBServer) SendAndClose(m *TransferTSDBResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferTSDBServer) Recv() (*TimeSeriesFile, error) { - m := new(TimeSeriesFile) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Ingester_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cortex.Ingester", - HandlerType: (*IngesterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Push", - Handler: _Ingester_Push_Handler, - }, - { - MethodName: "Query", - Handler: _Ingester_Query_Handler, - }, - { - MethodName: "LabelValues", - Handler: _Ingester_LabelValues_Handler, - }, - { - MethodName: "LabelNames", - Handler: _Ingester_LabelNames_Handler, - }, - { - MethodName: "UserStats", - Handler: _Ingester_UserStats_Handler, - }, - { - MethodName: "AllUserStats", - Handler: _Ingester_AllUserStats_Handler, - }, - { - MethodName: "MetricsForLabelMatchers", - Handler: _Ingester_MetricsForLabelMatchers_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QueryStream", - Handler: _Ingester_QueryStream_Handler, - ServerStreams: true, - }, - { - StreamName: "TransferChunks", - Handler: _Ingester_TransferChunks_Handler, - ClientStreams: true, - }, - { - StreamName: "TransferTSDB", - Handler: _Ingester_TransferTSDB_Handler, - ClientStreams: true, - }, - }, - Metadata: "cortex.proto", -} - -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Source != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Source)) - } - return i, nil -} - -func (m *WriteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Queries) > 0 { - for _, msg := range m.Queries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReadResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Results) > 0 { - for _, msg := range m.Results { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *QueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, msg := range m.Matchers { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *QueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, msg := range m.Timeseries { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.LabelName) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelName))) - i += copy(dAtA[i:], m.LabelName) - } - return i, nil -} - -func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.IngestionRate != 0 { - dAtA[i] = 0x9 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) - i += 8 - } - if m.NumSeries != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) - } - if m.ApiIngestionRate != 0 { - dAtA[i] = 0x19 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) - i += 8 - } - if m.RuleIngestionRate != 0 { - dAtA[i] = 0x21 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) - i += 8 - } - return i, nil -} - -func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.UserId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size())) - n1, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - return i, nil -} - -func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - } - if len(m.MatchersSet) > 0 { - for _, msg := range m.MatchersSet { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - for _, msg := range m.Metric { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.FromIngesterId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i += copy(dAtA[i:], m.FromIngesterId) - } - if len(m.UserId) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Chunks) > 0 { - for _, msg := range m.Chunks { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Chunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.StartTimestampMs != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - } - if m.Encoding != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Encoding)) - } - if len(m.Data) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Samples) > 0 { - for _, msg := range m.Samples { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Value != 0 { - dAtA[i] = 0x9 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i += 8 - } - if m.TimestampMs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) - } - return i, nil -} - -func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for _, msg := range m.Matchers { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Labels) > 0 { - for _, msg := range m.Labels { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintCortex(dAtA, i, uint64(m.Type)) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Value) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} - -func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.FromIngesterId) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i += copy(dAtA[i:], m.FromIngesterId) - } - if len(m.UserId) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i += copy(dAtA[i:], m.UserId) - } - if len(m.Filename) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) - i += copy(dAtA[i:], m.Filename) - } - if len(m.Data) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if m.Source != 0 { - n += 1 + sovCortex(uint64(m.Source)) - } - return n -} - -func (m *WriteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Queries) > 0 { - for _, e := range m.Queries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *ReadResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelValuesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *LabelValuesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - l = len(s) - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelNamesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *LabelNamesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *UserStatsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *UserStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IngestionRate != 0 { - n += 9 - } - if m.NumSeries != 0 { - n += 1 + sovCortex(uint64(m.NumSeries)) - } - if m.ApiIngestionRate != 0 { - n += 9 - } - if m.RuleIngestionRate != 0 { - n += 9 - } - return n -} - -func (m *UserIDStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *UsersStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if len(m.MatchersSet) > 0 { - for _, e := range m.MatchersSet { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *TimeSeriesChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if m.Encoding != 0 { - n += 1 + sovCortex(uint64(m.Encoding)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TransferChunksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *TimeSeries) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 9 - } - if m.TimestampMs != 0 { - n += 1 + sovCortex(uint64(m.TimestampMs)) - } - return n -} - -func (m *LabelMatchers) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelMatcher) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovCortex(uint64(m.Type)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TimeSeriesFile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Filename) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TransferTSDBResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovCortex(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozCortex(x uint64) (n int) { - return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *WriteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteRequest{`, - `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `}`, - }, "") - return s -} -func (this *WriteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteResponse{`, - `}`, - }, "") - return s -} -func (this *ReadRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadRequest{`, - `Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReadResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadResponse{`, - `Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *QueryRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, - `}`, - }, "") - return s -} -func (this *QueryResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *QueryStreamResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QueryStreamResponse{`, - `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesRequest{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesResponse{`, - `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesRequest{`, - `}`, - }, "") - return s -} -func (this *LabelNamesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesResponse{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `}`, - }, "") - return s -} -func (this *UserStatsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsRequest{`, - `}`, - }, "") - return s -} -func (this *UserStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsResponse{`, - `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, - `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, - `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, - `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, - `}`, - }, "") - return s -} -func (this *UserIDStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserIDStatsResponse{`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *UsersStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesChunk) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TimeSeriesChunk{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Chunk) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Chunk{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *TransferChunksResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferChunksResponse{`, - `}`, - }, "") - return s -} -func (this *TimeSeries) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TimeSeries{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LabelPair) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelPair{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Sample) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sample{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, - `}`, - }, "") - return s -} -func (this *LabelMatchers) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Metric) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Metric{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `}`, - }, "") - return s -} -func (this *LabelMatcher) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelMatcher{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TimeSeriesFile{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *TransferTSDBResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferTSDBResponse{`, - `}`, - }, "") - return s -} -func valueToStringCortex(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, PreallocTimeseries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - m.Source = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queries = append(m.Queries, &QueryRequest{}) - if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, &QueryResponse{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, TimeSeriesChunk{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.IngestionRate = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) - } - m.NumSeries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumSeries |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ApiIngestionRate = float64(math.Float64frombits(v)) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.RuleIngestionRate = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &UserStatsResponse{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, &UserIDStatsResponse{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) - if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metric = append(m.Metric, &Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) - } - m.Encoding = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Encoding |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) - } - m.TimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatchers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatcher) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= MatchType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferTSDBResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferTSDBResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferTSDBResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCortex(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCortex - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthCortex - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCortex(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthCortex - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow") -) diff --git a/integration-tests/proto/1.2.1/timeseries.go b/integration-tests/proto/1.2.1/timeseries.go deleted file mode 100644 index 204382952b..0000000000 --- a/integration-tests/proto/1.2.1/timeseries.go +++ /dev/null @@ -1,256 +0,0 @@ -package client - -import ( - "flag" - "fmt" - "io" - "strings" - "sync" - "unsafe" - - "github.com/prometheus/prometheus/pkg/labels" -) - -var ( - expectedTimeseries = 100 - expectedLabels = 20 - expectedSamplesPerSeries = 10 - - slicePool = sync.Pool{ - New: func() interface{} { - return make([]PreallocTimeseries, 0, expectedTimeseries) - }, - } - - timeSeriesPool = sync.Pool{ - New: func() interface{} { - return &TimeSeries{ - Labels: make([]LabelAdapter, 0, expectedLabels), - Samples: make([]Sample, 0, expectedSamplesPerSeries), - } - }, - } -) - -// PreallocConfig configures how structures will be preallocated to optimise -// proto unmarshalling. -type PreallocConfig struct{} - -// RegisterFlags registers configuration settings. -func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, use for preallocations.") - f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.") - f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.") -} - -// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal. -type PreallocWriteRequest struct { - WriteRequest -} - -// Unmarshal implements proto.Message. -func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error { - p.Timeseries = slicePool.Get().([]PreallocTimeseries) - return p.WriteRequest.Unmarshal(dAtA) -} - -// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal. -type PreallocTimeseries struct { - *TimeSeries -} - -// Unmarshal implements proto.Message. -func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { - p.TimeSeries = timeSeriesPool.Get().(*TimeSeries) - return p.TimeSeries.Unmarshal(dAtA) -} - -// LabelAdapter is a labels.Label that can be marshalled to/from protos. -type LabelAdapter labels.Label - -// Marshal implements proto.Marshaller. -func (bs *LabelAdapter) Marshal() ([]byte, error) { - buf := make([]byte, bs.Size()) - _, err := bs.MarshalTo(buf) - return buf, err -} - -// MarshalTo implements proto.Marshaller. -func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) { - var i int - ls := (*labels.Label)(bs) - - buf[i] = 0xa - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) - i += copy(buf[i:], ls.Name) - - buf[i] = 0x12 - i++ - i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) - i += copy(buf[i:], ls.Value) - - return i, nil -} - -// Unmarshal a LabelAdapater, implements proto.Unmarshaller. -// NB this is a copy of the autogenerated code to unmarshal a LabelPair, -// with the byte copying replaced with a yoloString. -func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Name = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Value = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func yoloString(buf []byte) string { - return *((*string)(unsafe.Pointer(&buf))) -} - -// Size implements proto.Sizer. -func (bs *LabelAdapter) Size() int { - ls := (*labels.Label)(bs) - var n int - l := len(ls.Name) - n += 1 + l + sovCortex(uint64(l)) - l = len(ls.Value) - n += 1 + l + sovCortex(uint64(l)) - return n -} - -// Equal implements proto.Equaler. -func (bs *LabelAdapter) Equal(other LabelAdapter) bool { - return bs.Name == other.Name && bs.Value == other.Value -} - -// Compare implements proto.Comparer. -func (bs *LabelAdapter) Compare(other LabelAdapter) int { - if c := strings.Compare(bs.Name, other.Name); c != 0 { - return c - } - return strings.Compare(bs.Value, other.Value) -} - -// ReuseSlice puts the slice back into a sync.Pool for reuse. -func ReuseSlice(slice []PreallocTimeseries) { - for i := range slice { - ReuseTimeseries(slice[i].TimeSeries) - } - slicePool.Put(slice[:0]) -} - -// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. -func ReuseTimeseries(ts *TimeSeries) { - ts.Labels = ts.Labels[:0] - ts.Samples = ts.Samples[:0] - timeSeriesPool.Put(ts) -} diff --git a/integration-tests/proto/1.3.0/cortex.pb.go b/integration-tests/proto/1.3.0/cortex.pb.go deleted file mode 100644 index 4bd6772008..0000000000 --- a/integration-tests/proto/1.3.0/cortex.pb.go +++ /dev/null @@ -1,7967 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cortex.proto - -package client - -import ( - bytes "bytes" - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type MatchType int32 - -const ( - EQUAL MatchType = 0 - NOT_EQUAL MatchType = 1 - REGEX_MATCH MatchType = 2 - REGEX_NO_MATCH MatchType = 3 -) - -var MatchType_name = map[int32]string{ - 0: "EQUAL", - 1: "NOT_EQUAL", - 2: "REGEX_MATCH", - 3: "REGEX_NO_MATCH", -} - -var MatchType_value = map[string]int32{ - "EQUAL": 0, - "NOT_EQUAL": 1, - "REGEX_MATCH": 2, - "REGEX_NO_MATCH": 3, -} - -func (MatchType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0} -} - -type WriteRequest_SourceEnum int32 - -const ( - API WriteRequest_SourceEnum = 0 - RULE WriteRequest_SourceEnum = 1 -) - -var WriteRequest_SourceEnum_name = map[int32]string{ - 0: "API", - 1: "RULE", -} - -var WriteRequest_SourceEnum_value = map[string]int32{ - "API": 0, - "RULE": 1, -} - -func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0, 0} -} - -type WriteRequest struct { - Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` - Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0} -} -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { - if m != nil { - return m.Source - } - return API -} - -type WriteResponse struct { -} - -func (m *WriteResponse) Reset() { *m = WriteResponse{} } -func (*WriteResponse) ProtoMessage() {} -func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{1} -} -func (m *WriteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteResponse.Merge(m, src) -} -func (m *WriteResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteResponse proto.InternalMessageInfo - -type ReadRequest struct { - Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` -} - -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{2} -} -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -func (m *ReadRequest) GetQueries() []*QueryRequest { - if m != nil { - return m.Queries - } - return nil -} - -type ReadResponse struct { - Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` -} - -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{3} -} -func (m *ReadResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResponse.Merge(m, src) -} -func (m *ReadResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResponse proto.InternalMessageInfo - -func (m *ReadResponse) GetResults() []*QueryResponse { - if m != nil { - return m.Results - } - return nil -} - -type QueryRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *QueryRequest) Reset() { *m = QueryRequest{} } -func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{4} -} -func (m *QueryRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRequest.Merge(m, src) -} -func (m *QueryRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRequest proto.InternalMessageInfo - -func (m *QueryRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type QueryResponse struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryResponse) Reset() { *m = QueryResponse{} } -func (*QueryResponse) ProtoMessage() {} -func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{5} -} -func (m *QueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResponse.Merge(m, src) -} -func (m *QueryResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResponse proto.InternalMessageInfo - -func (m *QueryResponse) GetTimeseries() []TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -// QueryStreamResponse contains a batch of timeseries chunks. -type QueryStreamResponse struct { - Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } -func (*QueryStreamResponse) ProtoMessage() {} -func (*QueryStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{6} -} -func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryStreamResponse.Merge(m, src) -} -func (m *QueryStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryStreamResponse proto.InternalMessageInfo - -func (m *QueryStreamResponse) GetTimeseries() []TimeSeriesChunk { - if m != nil { - return m.Timeseries - } - return nil -} - -type LabelValuesRequest struct { - LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` -} - -func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } -func (*LabelValuesRequest) ProtoMessage() {} -func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{7} -} -func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesRequest.Merge(m, src) -} -func (m *LabelValuesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo - -func (m *LabelValuesRequest) GetLabelName() string { - if m != nil { - return m.LabelName - } - return "" -} - -type LabelValuesResponse struct { - LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` -} - -func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } -func (*LabelValuesResponse) ProtoMessage() {} -func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{8} -} -func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesResponse.Merge(m, src) -} -func (m *LabelValuesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo - -func (m *LabelValuesResponse) GetLabelValues() []string { - if m != nil { - return m.LabelValues - } - return nil -} - -type LabelNamesRequest struct { -} - -func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } -func (*LabelNamesRequest) ProtoMessage() {} -func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{9} -} -func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesRequest.Merge(m, src) -} -func (m *LabelNamesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo - -type LabelNamesResponse struct { - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } -func (*LabelNamesResponse) ProtoMessage() {} -func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{10} -} -func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesResponse.Merge(m, src) -} -func (m *LabelNamesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo - -func (m *LabelNamesResponse) GetLabelNames() []string { - if m != nil { - return m.LabelNames - } - return nil -} - -type UserStatsRequest struct { -} - -func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } -func (*UserStatsRequest) ProtoMessage() {} -func (*UserStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{11} -} -func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsRequest.Merge(m, src) -} -func (m *UserStatsRequest) XXX_Size() int { - return m.Size() -} -func (m *UserStatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsRequest proto.InternalMessageInfo - -type UserStatsResponse struct { - IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate,proto3" json:"ingestion_rate,omitempty"` - NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` - ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` - RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` -} - -func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } -func (*UserStatsResponse) ProtoMessage() {} -func (*UserStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{12} -} -func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsResponse.Merge(m, src) -} -func (m *UserStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsResponse proto.InternalMessageInfo - -func (m *UserStatsResponse) GetIngestionRate() float64 { - if m != nil { - return m.IngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetNumSeries() uint64 { - if m != nil { - return m.NumSeries - } - return 0 -} - -func (m *UserStatsResponse) GetApiIngestionRate() float64 { - if m != nil { - return m.ApiIngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetRuleIngestionRate() float64 { - if m != nil { - return m.RuleIngestionRate - } - return 0 -} - -type UserIDStatsResponse struct { - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } -func (*UserIDStatsResponse) ProtoMessage() {} -func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{13} -} -func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserIDStatsResponse.Merge(m, src) -} -func (m *UserIDStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserIDStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserIDStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserIDStatsResponse proto.InternalMessageInfo - -func (m *UserIDStatsResponse) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *UserIDStatsResponse) GetData() *UserStatsResponse { - if m != nil { - return m.Data - } - return nil -} - -type UsersStatsResponse struct { - Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } -func (*UsersStatsResponse) ProtoMessage() {} -func (*UsersStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{14} -} -func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsersStatsResponse.Merge(m, src) -} -func (m *UsersStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UsersStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UsersStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UsersStatsResponse proto.InternalMessageInfo - -func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { - if m != nil { - return m.Stats - } - return nil -} - -type MetricsForLabelMatchersRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` -} - -func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } -func (*MetricsForLabelMatchersRequest) ProtoMessage() {} -func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{15} -} -func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) -} -func (m *MetricsForLabelMatchersRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersRequest proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { - if m != nil { - return m.MatchersSet - } - return nil -} - -type MetricsForLabelMatchersResponse struct { - Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` -} - -func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } -func (*MetricsForLabelMatchersResponse) ProtoMessage() {} -func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{16} -} -func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) -} -func (m *MetricsForLabelMatchersResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersResponse proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -type TimeSeriesChunk struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` - Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } -func (*TimeSeriesChunk) ProtoMessage() {} -func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{17} -} -func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesChunk.Merge(m, src) -} -func (m *TimeSeriesChunk) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesChunk) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo - -func (m *TimeSeriesChunk) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesChunk) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesChunk) GetChunks() []Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type Chunk struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Encoding int32 `protobuf:"varint,3,opt,name=encoding,proto3" json:"encoding,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Chunk) Reset() { *m = Chunk{} } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{18} -} -func (m *Chunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(m, src) -} -func (m *Chunk) XXX_Size() int { - return m.Size() -} -func (m *Chunk) XXX_DiscardUnknown() { - xxx_messageInfo_Chunk.DiscardUnknown(m) -} - -var xxx_messageInfo_Chunk proto.InternalMessageInfo - -func (m *Chunk) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Chunk) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *Chunk) GetEncoding() int32 { - if m != nil { - return m.Encoding - } - return 0 -} - -func (m *Chunk) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type TransferChunksResponse struct { -} - -func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } -func (*TransferChunksResponse) ProtoMessage() {} -func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{19} -} -func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferChunksResponse.Merge(m, src) -} -func (m *TransferChunksResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferChunksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo - -type TimeSeries struct { - Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` - // Sorted by time, oldest sample first. - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{20} -} -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return m.Size() -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetSamples() []Sample { - if m != nil { - return m.Samples - } - return nil -} - -type LabelPair struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{21} -} -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return m.Size() -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LabelPair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{22} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Sample) GetTimestampMs() int64 { - if m != nil { - return m.TimestampMs - } - return 0 -} - -type LabelMatchers struct { - Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } -func (*LabelMatchers) ProtoMessage() {} -func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{23} -} -func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatchers) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatchers.Merge(m, src) -} -func (m *LabelMatchers) XXX_Size() int { - return m.Size() -} -func (m *LabelMatchers) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatchers.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo - -func (m *LabelMatchers) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type Metric struct { - Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{24} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -type LabelMatcher struct { - Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{25} -} -func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatcher) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatcher.Merge(m, src) -} -func (m *LabelMatcher) XXX_Size() int { - return m.Size() -} -func (m *LabelMatcher) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatcher.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo - -func (m *LabelMatcher) GetType() MatchType { - if m != nil { - return m.Type - } - return EQUAL -} - -func (m *LabelMatcher) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LabelMatcher) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type TimeSeriesFile struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } -func (*TimeSeriesFile) ProtoMessage() {} -func (*TimeSeriesFile) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{26} -} -func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesFile.Merge(m, src) -} -func (m *TimeSeriesFile) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesFile) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo - -func (m *TimeSeriesFile) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesFile) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesFile) GetFilename() string { - if m != nil { - return m.Filename - } - return "" -} - -func (m *TimeSeriesFile) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type TransferTSDBResponse struct { -} - -func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } -func (*TransferTSDBResponse) ProtoMessage() {} -func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{27} -} -func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferTSDBResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferTSDBResponse.Merge(m, src) -} -func (m *TransferTSDBResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferTSDBResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferTSDBResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) - proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) - proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest") - proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse") - proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest") - proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse") - proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest") - proto.RegisterType((*QueryResponse)(nil), "cortex.QueryResponse") - proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse") - proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest") - proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse") - proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest") - proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse") - proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest") - proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse") - proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse") - proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") - proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") - proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") - proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk") - proto.RegisterType((*Chunk)(nil), "cortex.Chunk") - proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse") - proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries") - proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair") - proto.RegisterType((*Sample)(nil), "cortex.Sample") - proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") - proto.RegisterType((*Metric)(nil), "cortex.Metric") - proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") - proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile") - proto.RegisterType((*TransferTSDBResponse)(nil), "cortex.TransferTSDBResponse") -} - -func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } - -var fileDescriptor_893a47d0a749d749 = []byte{ - // 1266 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xdf, 0x8d, 0x5f, 0x12, 0x3f, 0xde, 0xb8, 0xce, 0x24, 0x6d, 0xd3, 0xed, 0xff, 0xbf, 0x29, - 0x23, 0xb5, 0x44, 0x40, 0xdd, 0x92, 0xaa, 0xd0, 0x03, 0x55, 0xe5, 0xb4, 0x69, 0x6b, 0x94, 0xa4, - 0xe9, 0xd8, 0x05, 0x84, 0x84, 0xac, 0x8d, 0x3d, 0x49, 0x56, 0xec, 0x8b, 0xbb, 0x33, 0x8b, 0xe8, - 0x01, 0x09, 0x89, 0x0f, 0x00, 0x47, 0x3e, 0x02, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8f, 0x15, - 0x87, 0x8a, 0x3a, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xee, 0x5a, 0x44, 0x40, - 0x6f, 0x3b, 0xcf, 0xf3, 0x9b, 0xdf, 0x3c, 0xaf, 0x33, 0xcf, 0x82, 0x31, 0x08, 0x42, 0x4e, 0xbf, - 0x6e, 0x8d, 0xc2, 0x80, 0x07, 0xa8, 0x2a, 0x57, 0xe6, 0xe5, 0x43, 0x87, 0x1f, 0x45, 0xfb, 0xad, - 0x41, 0xe0, 0x5d, 0x39, 0x0c, 0x0e, 0x83, 0x2b, 0x42, 0xbd, 0x1f, 0x1d, 0x88, 0x95, 0x58, 0x88, - 0x2f, 0xb9, 0x0d, 0xff, 0xaa, 0x83, 0xf1, 0x69, 0xe8, 0x70, 0x4a, 0xe8, 0xe3, 0x88, 0x32, 0x8e, - 0x76, 0x01, 0xb8, 0xe3, 0x51, 0x46, 0x43, 0x87, 0xb2, 0x55, 0xfd, 0x42, 0x69, 0xbd, 0xbe, 0x81, - 0x5a, 0xea, 0xa8, 0x9e, 0xe3, 0xd1, 0xae, 0xd0, 0x6c, 0x9a, 0x4f, 0x5f, 0xac, 0x69, 0xbf, 0xbf, - 0x58, 0x43, 0x7b, 0x21, 0xb5, 0x5d, 0x37, 0x18, 0xf4, 0xd2, 0x5d, 0x24, 0xc3, 0x80, 0x3e, 0x84, - 0x6a, 0x37, 0x88, 0xc2, 0x01, 0x5d, 0x9d, 0xbb, 0xa0, 0xaf, 0x37, 0x36, 0xd6, 0x12, 0xae, 0xec, - 0xa9, 0x2d, 0x09, 0xd9, 0xf2, 0x23, 0x8f, 0x54, 0x99, 0xf8, 0xc6, 0x6b, 0x00, 0x13, 0x29, 0x9a, - 0x87, 0x52, 0x7b, 0xaf, 0xd3, 0xd4, 0xd0, 0x02, 0x94, 0xc9, 0xa3, 0xed, 0xad, 0xa6, 0x8e, 0x4f, - 0xc1, 0xa2, 0xe2, 0x60, 0xa3, 0xc0, 0x67, 0x14, 0xdf, 0x84, 0x3a, 0xa1, 0xf6, 0x30, 0xf1, 0xa4, - 0x05, 0xf3, 0x8f, 0xa3, 0xac, 0x1b, 0x2b, 0xc9, 0xd1, 0x0f, 0x23, 0x1a, 0x3e, 0x51, 0x30, 0x92, - 0x80, 0xf0, 0x2d, 0x30, 0xe4, 0x76, 0x49, 0x87, 0xae, 0xc0, 0x7c, 0x48, 0x59, 0xe4, 0xf2, 0x64, - 0xff, 0xe9, 0xa9, 0xfd, 0x12, 0x47, 0x12, 0x14, 0xfe, 0x51, 0x07, 0x23, 0x4b, 0x8d, 0xde, 0x03, - 0xc4, 0xb8, 0x1d, 0xf2, 0xbe, 0x88, 0x07, 0xb7, 0xbd, 0x51, 0xdf, 0x8b, 0xc9, 0xf4, 0xf5, 0x12, - 0x69, 0x0a, 0x4d, 0x2f, 0x51, 0xec, 0x30, 0xb4, 0x0e, 0x4d, 0xea, 0x0f, 0xf3, 0xd8, 0x39, 0x81, - 0x6d, 0x50, 0x7f, 0x98, 0x45, 0x5e, 0x85, 0x05, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xd5, 0x52, - 0xde, 0xb5, 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x49, 0x52, 0x14, 0xee, 0xc0, 0x62, 0xce, 0x68, - 0x74, 0xe3, 0x84, 0x69, 0x2e, 0xc7, 0x69, 0xce, 0x26, 0x14, 0xf7, 0x60, 0x59, 0x50, 0x75, 0x79, - 0x48, 0x6d, 0x2f, 0x25, 0xbc, 0x59, 0x40, 0x78, 0xf6, 0x75, 0xc2, 0xdb, 0x47, 0x91, 0xff, 0x65, - 0x01, 0xeb, 0x35, 0x40, 0xc2, 0xf4, 0x4f, 0x6c, 0x37, 0xa2, 0x2c, 0x09, 0xe0, 0xff, 0x01, 0xdc, - 0x58, 0xda, 0xf7, 0x6d, 0x8f, 0x8a, 0xc0, 0xd5, 0x48, 0x4d, 0x48, 0x76, 0x6d, 0x8f, 0xe2, 0x1b, - 0xb0, 0x9c, 0xdb, 0xa4, 0x4c, 0x79, 0x0b, 0x0c, 0xb9, 0xeb, 0x2b, 0x21, 0x17, 0xc6, 0xd4, 0x48, - 0xdd, 0x9d, 0x40, 0xf1, 0x32, 0x2c, 0x6d, 0x27, 0x34, 0xc9, 0x69, 0xf8, 0xba, 0xb2, 0x41, 0x09, - 0x15, 0xdb, 0x1a, 0xd4, 0x27, 0x36, 0x24, 0x64, 0x90, 0x1a, 0xc1, 0x30, 0x82, 0xe6, 0x23, 0x46, - 0xc3, 0x2e, 0xb7, 0x79, 0x4a, 0xf5, 0x8b, 0x0e, 0x4b, 0x19, 0xa1, 0xa2, 0xba, 0x08, 0x0d, 0xc7, - 0x3f, 0xa4, 0x8c, 0x3b, 0x81, 0xdf, 0x0f, 0x6d, 0x2e, 0x5d, 0xd2, 0xc9, 0x62, 0x2a, 0x25, 0x36, - 0xa7, 0xb1, 0xd7, 0x7e, 0xe4, 0xf5, 0x55, 0x28, 0xe3, 0x12, 0x28, 0x93, 0x9a, 0x1f, 0x79, 0x32, - 0x82, 0x71, 0x55, 0xd9, 0x23, 0xa7, 0x3f, 0xc5, 0x54, 0x12, 0x4c, 0x4d, 0x7b, 0xe4, 0x74, 0x72, - 0x64, 0x2d, 0x58, 0x0e, 0x23, 0x97, 0x4e, 0xc3, 0xcb, 0x02, 0xbe, 0x14, 0xab, 0x72, 0x78, 0xfc, - 0x05, 0x2c, 0xc7, 0x86, 0x77, 0xee, 0xe4, 0x4d, 0x3f, 0x0b, 0xf3, 0x11, 0xa3, 0x61, 0xdf, 0x19, - 0xaa, 0x34, 0x54, 0xe3, 0x65, 0x67, 0x88, 0x2e, 0x43, 0x79, 0x68, 0x73, 0x5b, 0x98, 0x59, 0xdf, - 0x38, 0x97, 0x64, 0xfc, 0x35, 0xe7, 0x89, 0x80, 0xe1, 0x7b, 0x80, 0x62, 0x15, 0xcb, 0xb3, 0xbf, - 0x0f, 0x15, 0x16, 0x0b, 0x54, 0xdd, 0x9c, 0xcf, 0xb2, 0x4c, 0x59, 0x42, 0x24, 0x12, 0xff, 0xac, - 0x83, 0xb5, 0x43, 0x79, 0xe8, 0x0c, 0xd8, 0xdd, 0x20, 0xcc, 0x96, 0x3d, 0x7b, 0xd3, 0xed, 0x77, - 0x03, 0x8c, 0xa4, 0xb1, 0xfa, 0x8c, 0x72, 0xd5, 0x82, 0xa7, 0x8b, 0x5a, 0x90, 0x91, 0x7a, 0x02, - 0xed, 0x52, 0x8e, 0x3b, 0xb0, 0x36, 0xd3, 0x66, 0x15, 0x8a, 0x4b, 0x50, 0xf5, 0x04, 0x44, 0xc5, - 0xa2, 0x91, 0xd0, 0xca, 0x8d, 0x44, 0x69, 0xf1, 0x6f, 0x3a, 0x9c, 0x9a, 0x6a, 0xab, 0xd8, 0x85, - 0x83, 0x30, 0xf0, 0x54, 0xae, 0xb3, 0xd9, 0x6a, 0xc4, 0xf2, 0x8e, 0x12, 0x77, 0x86, 0xd9, 0x74, - 0xce, 0xe5, 0xd2, 0x79, 0x0b, 0xaa, 0xa2, 0xb4, 0x93, 0x8b, 0x65, 0x29, 0xe7, 0xd5, 0x9e, 0xed, - 0x84, 0x9b, 0x2b, 0xea, 0xe6, 0x37, 0x84, 0xa8, 0x3d, 0xb4, 0x47, 0x9c, 0x86, 0x44, 0x6d, 0x43, - 0xef, 0x42, 0x75, 0x10, 0x1b, 0xc3, 0x56, 0xcb, 0x82, 0x60, 0x31, 0x21, 0xc8, 0x76, 0xbe, 0x82, - 0xe0, 0xef, 0x75, 0xa8, 0x48, 0xd3, 0xdf, 0x54, 0xae, 0x4c, 0x58, 0xa0, 0xfe, 0x20, 0x18, 0x3a, - 0xfe, 0xa1, 0x68, 0x91, 0x0a, 0x49, 0xd7, 0x08, 0xa9, 0xd2, 0x8d, 0x7b, 0xc1, 0x50, 0xf5, 0xb9, - 0x0a, 0x67, 0x7a, 0xa1, 0xed, 0xb3, 0x03, 0x1a, 0x0a, 0xc3, 0xd2, 0xc4, 0xe0, 0x6f, 0x00, 0x26, - 0xf1, 0xce, 0xc4, 0x49, 0xff, 0x67, 0x71, 0x6a, 0xc1, 0x3c, 0xb3, 0xbd, 0x91, 0x2b, 0x3a, 0x3c, - 0x97, 0xe8, 0xae, 0x10, 0xab, 0x48, 0x25, 0x20, 0x7c, 0x1d, 0x6a, 0x29, 0x75, 0x6c, 0x79, 0x7a, - 0x23, 0x1a, 0x44, 0x7c, 0xa3, 0x15, 0xa8, 0x88, 0xfb, 0x4e, 0x04, 0xc2, 0x20, 0x72, 0x81, 0xdb, - 0x50, 0x95, 0x7c, 0x13, 0xbd, 0xbc, 0x73, 0xe4, 0x22, 0xbe, 0x2b, 0x0b, 0xa2, 0x58, 0xe7, 0x93, - 0x10, 0xe2, 0x36, 0x2c, 0xe6, 0x4a, 0x35, 0xf7, 0xfc, 0xe8, 0x27, 0x7c, 0x7e, 0xaa, 0xb2, 0x7c, - 0xff, 0x75, 0xdc, 0x70, 0x1f, 0x8c, 0xec, 0x21, 0xe8, 0x22, 0x94, 0xf9, 0x93, 0x91, 0xf4, 0xaa, - 0x31, 0xa1, 0x13, 0xea, 0xde, 0x93, 0x11, 0x25, 0x42, 0x9d, 0x46, 0x4c, 0x56, 0xfb, 0x54, 0xc4, - 0x4a, 0x42, 0xa8, 0x22, 0xf6, 0x9d, 0x0e, 0x8d, 0x49, 0xa2, 0xef, 0x3a, 0x2e, 0xfd, 0x2f, 0xfa, - 0xca, 0x84, 0x85, 0x03, 0xc7, 0xa5, 0xc2, 0x06, 0x79, 0x5c, 0xba, 0x2e, 0xac, 0xc3, 0x33, 0xb0, - 0x92, 0xd4, 0x61, 0xaf, 0x7b, 0x67, 0x33, 0xa9, 0xc2, 0x77, 0x3e, 0x86, 0x5a, 0xea, 0x1a, 0xaa, - 0x41, 0x65, 0xeb, 0xe1, 0xa3, 0xf6, 0x76, 0x53, 0x43, 0x8b, 0x50, 0xdb, 0x7d, 0xd0, 0xeb, 0xcb, - 0xa5, 0x8e, 0x4e, 0x41, 0x9d, 0x6c, 0xdd, 0xdb, 0xfa, 0xac, 0xbf, 0xd3, 0xee, 0xdd, 0xbe, 0xdf, - 0x9c, 0x43, 0x08, 0x1a, 0x52, 0xb0, 0xfb, 0x40, 0xc9, 0x4a, 0x1b, 0xc7, 0x15, 0x58, 0x48, 0x6c, - 0x47, 0xd7, 0xa1, 0xbc, 0x17, 0xb1, 0x23, 0xb4, 0x52, 0x34, 0x9f, 0x99, 0xa7, 0xa7, 0xa4, 0xaa, - 0x27, 0x34, 0xf4, 0x01, 0x54, 0xc4, 0x34, 0x80, 0x0a, 0x87, 0x2b, 0xb3, 0x78, 0x64, 0xc2, 0x1a, - 0xba, 0x03, 0xf5, 0xcc, 0x14, 0x31, 0x63, 0xf7, 0xf9, 0x9c, 0x34, 0x3f, 0x70, 0x60, 0xed, 0xaa, - 0x8e, 0xee, 0x43, 0x3d, 0x33, 0x00, 0x20, 0x33, 0x57, 0x4c, 0xb9, 0x51, 0x62, 0xc2, 0x55, 0x30, - 0x31, 0x60, 0x0d, 0x6d, 0x01, 0x4c, 0xde, 0x7e, 0x74, 0x2e, 0x07, 0xce, 0x0e, 0x09, 0xa6, 0x59, - 0xa4, 0x4a, 0x69, 0x36, 0xa1, 0x96, 0xbe, 0x7c, 0x68, 0xb5, 0xe0, 0x31, 0x94, 0x24, 0xb3, 0x9f, - 0x49, 0xac, 0xa1, 0xbb, 0x60, 0xb4, 0x5d, 0xf7, 0x24, 0x34, 0x66, 0x56, 0xc3, 0xa6, 0x79, 0x5c, - 0x38, 0x3b, 0xe3, 0xb1, 0x41, 0x97, 0xf2, 0x8f, 0xca, 0xac, 0x17, 0xd4, 0x7c, 0xfb, 0x6f, 0x71, - 0xe9, 0x69, 0x3b, 0xd0, 0xc8, 0x5f, 0x9c, 0x68, 0xd6, 0xf4, 0x67, 0x5a, 0xa9, 0xa2, 0xf8, 0xa6, - 0xd5, 0xd6, 0xe3, 0xcc, 0x1a, 0xd9, 0xfa, 0x47, 0x67, 0x5e, 0x27, 0x8b, 0x5b, 0xd3, 0xfc, 0xdf, - 0x34, 0x57, 0xb6, 0x5b, 0x62, 0xa6, 0xcd, 0x8f, 0x9e, 0xbd, 0xb4, 0xb4, 0xe7, 0x2f, 0x2d, 0xed, - 0xd5, 0x4b, 0x4b, 0xff, 0x76, 0x6c, 0xe9, 0x3f, 0x8d, 0x2d, 0xfd, 0xe9, 0xd8, 0xd2, 0x9f, 0x8d, - 0x2d, 0xfd, 0x8f, 0xb1, 0xa5, 0xff, 0x39, 0xb6, 0xb4, 0x57, 0x63, 0x4b, 0xff, 0xe1, 0xd8, 0xd2, - 0x9e, 0x1d, 0x5b, 0xda, 0xf3, 0x63, 0x4b, 0xfb, 0xbc, 0x3a, 0x70, 0x1d, 0xea, 0xf3, 0xfd, 0xaa, - 0xf8, 0x4d, 0xba, 0xf6, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x33, 0x55, 0x6d, 0x0d, - 0x00, 0x00, -} - -func (x MatchType) String() string { - s, ok := MatchType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x WriteRequest_SourceEnum) String() string { - s, ok := WriteRequest_SourceEnum_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *WriteRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteRequest) - if !ok { - that2, ok := that.(WriteRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(that1.Timeseries[i]) { - return false - } - } - if this.Source != that1.Source { - return false - } - return true -} -func (this *WriteResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteResponse) - if !ok { - that2, ok := that.(WriteResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *ReadRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadRequest) - if !ok { - that2, ok := that.(ReadRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Queries) != len(that1.Queries) { - return false - } - for i := range this.Queries { - if !this.Queries[i].Equal(that1.Queries[i]) { - return false - } - } - return true -} -func (this *ReadResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadResponse) - if !ok { - that2, ok := that.(ReadResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Results) != len(that1.Results) { - return false - } - for i := range this.Results { - if !this.Results[i].Equal(that1.Results[i]) { - return false - } - } - return true -} -func (this *QueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryRequest) - if !ok { - that2, ok := that.(QueryRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *QueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryResponse) - if !ok { - that2, ok := that.(QueryResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *QueryStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryStreamResponse) - if !ok { - that2, ok := that.(QueryStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *LabelValuesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesRequest) - if !ok { - that2, ok := that.(LabelValuesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.LabelName != that1.LabelName { - return false - } - return true -} -func (this *LabelValuesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesResponse) - if !ok { - that2, ok := that.(LabelValuesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelValues) != len(that1.LabelValues) { - return false - } - for i := range this.LabelValues { - if this.LabelValues[i] != that1.LabelValues[i] { - return false - } - } - return true -} -func (this *LabelNamesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesRequest) - if !ok { - that2, ok := that.(LabelNamesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *LabelNamesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesResponse) - if !ok { - that2, ok := that.(LabelNamesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelNames) != len(that1.LabelNames) { - return false - } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { - return false - } - } - return true -} -func (this *UserStatsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsRequest) - if !ok { - that2, ok := that.(UserStatsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *UserStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsResponse) - if !ok { - that2, ok := that.(UserStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.IngestionRate != that1.IngestionRate { - return false - } - if this.NumSeries != that1.NumSeries { - return false - } - if this.ApiIngestionRate != that1.ApiIngestionRate { - return false - } - if this.RuleIngestionRate != that1.RuleIngestionRate { - return false - } - return true -} -func (this *UserIDStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserIDStatsResponse) - if !ok { - that2, ok := that.(UserIDStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.UserId != that1.UserId { - return false - } - if !this.Data.Equal(that1.Data) { - return false - } - return true -} -func (this *UsersStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UsersStatsResponse) - if !ok { - that2, ok := that.(UsersStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Stats) != len(that1.Stats) { - return false - } - for i := range this.Stats { - if !this.Stats[i].Equal(that1.Stats[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersRequest) - if !ok { - that2, ok := that.(MetricsForLabelMatchersRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.MatchersSet) != len(that1.MatchersSet) { - return false - } - for i := range this.MatchersSet { - if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersResponse) - if !ok { - that2, ok := that.(MetricsForLabelMatchersResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Metric) != len(that1.Metric) { - return false - } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { - return false - } - } - return true -} -func (this *TimeSeriesChunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesChunk) - if !ok { - that2, ok := that.(TimeSeriesChunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *Chunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Chunk) - if !ok { - that2, ok := that.(Chunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if this.Encoding != that1.Encoding { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *TransferChunksResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferChunksResponse) - if !ok { - that2, ok := that.(TransferChunksResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *TimeSeries) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeries) - if !ok { - that2, ok := that.(TimeSeries) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Samples) != len(that1.Samples) { - return false - } - for i := range this.Samples { - if !this.Samples[i].Equal(&that1.Samples[i]) { - return false - } - } - return true -} -func (this *LabelPair) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelPair) - if !ok { - that2, ok := that.(LabelPair) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Name, that1.Name) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - return true -} -func (this *Sample) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Sample) - if !ok { - that2, ok := that.(Sample) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Value != that1.Value { - return false - } - if this.TimestampMs != that1.TimestampMs { - return false - } - return true -} -func (this *LabelMatchers) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatchers) - if !ok { - that2, ok := that.(LabelMatchers) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *Metric) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Metric) - if !ok { - that2, ok := that.(Metric) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - return true -} -func (this *LabelMatcher) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatcher) - if !ok { - that2, ok := that.(LabelMatcher) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.Name != that1.Name { - return false - } - if this.Value != that1.Value { - return false - } - return true -} -func (this *TimeSeriesFile) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesFile) - if !ok { - that2, ok := that.(TimeSeriesFile) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if this.Filename != that1.Filename { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *TransferTSDBResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferTSDBResponse) - if !ok { - that2, ok := that.(TransferTSDBResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *WriteRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.WriteRequest{") - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") - s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WriteResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.WriteResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadRequest{") - if this.Queries != nil { - s = append(s, "Queries: "+fmt.Sprintf("%#v", this.Queries)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadResponse{") - if this.Results != nil { - s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.QueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.QueryResponse{") - if this.Timeseries != nil { - vs := make([]*TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.QueryStreamResponse{") - if this.Timeseries != nil { - vs := make([]*TimeSeriesChunk, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesRequest{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesResponse{") - s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.LabelNamesRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesResponse{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.UserStatsRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.UserStatsResponse{") - s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") - s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") - s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") - s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserIDStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.UserIDStatsResponse{") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Data != nil { - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UsersStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.UsersStatsResponse{") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.MetricsForLabelMatchersRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.MatchersSet != nil { - s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsForLabelMatchersResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesChunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesChunk{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*Chunk, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Chunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.Chunk{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TransferChunksResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.TransferChunksResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeries) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.TimeSeries{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Samples != nil { - vs := make([]*Sample, len(this.Samples)) - for i := range vs { - vs[i] = &this.Samples[i] - } - s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelPair) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.LabelPair{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Sample) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.Sample{") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatchers) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelMatchers{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Metric) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.Metric{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatcher) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.LabelMatcher{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesFile) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesFile{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TransferTSDBResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.TransferTSDBResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringCortex(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IngesterClient is the client API for Ingester service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngesterClient interface { - Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) - Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) - QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) - LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) - LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) - UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) - AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) - MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) - // TransferTSDB transfers all files of a tsdb to a joining ingester - TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) -} - -type ingesterClient struct { - cc *grpc.ClientConn -} - -func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { - return &ingesterClient{cc} -} - -func (c *ingesterClient) Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { - out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { - out := new(QueryResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Query", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterQueryStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_QueryStreamClient interface { - Recv() (*QueryStreamResponse, error) - grpc.ClientStream -} - -type ingesterQueryStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { - m := new(QueryStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { - out := new(LabelValuesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { - out := new(LabelNamesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { - out := new(UserStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { - out := new(UsersStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { - out := new(MetricsForLabelMatchersResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/TransferChunks", opts...) - if err != nil { - return nil, err - } - x := &ingesterTransferChunksClient{stream} - return x, nil -} - -type Ingester_TransferChunksClient interface { - Send(*TimeSeriesChunk) error - CloseAndRecv() (*TransferChunksResponse, error) - grpc.ClientStream -} - -type ingesterTransferChunksClient struct { - grpc.ClientStream -} - -func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { - return x.ClientStream.SendMsg(m) -} - -func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(TransferChunksResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/TransferTSDB", opts...) - if err != nil { - return nil, err - } - x := &ingesterTransferTSDBClient{stream} - return x, nil -} - -type Ingester_TransferTSDBClient interface { - Send(*TimeSeriesFile) error - CloseAndRecv() (*TransferTSDBResponse, error) - grpc.ClientStream -} - -type ingesterTransferTSDBClient struct { - grpc.ClientStream -} - -func (x *ingesterTransferTSDBClient) Send(m *TimeSeriesFile) error { - return x.ClientStream.SendMsg(m) -} - -func (x *ingesterTransferTSDBClient) CloseAndRecv() (*TransferTSDBResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(TransferTSDBResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// IngesterServer is the server API for Ingester service. -type IngesterServer interface { - Push(context.Context, *WriteRequest) (*WriteResponse, error) - Query(context.Context, *QueryRequest) (*QueryResponse, error) - QueryStream(*QueryRequest, Ingester_QueryStreamServer) error - LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) - LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) - UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) - AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) - MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(Ingester_TransferChunksServer) error - // TransferTSDB transfers all files of a tsdb to a joining ingester - TransferTSDB(Ingester_TransferTSDBServer) error -} - -// UnimplementedIngesterServer can be embedded to have forward compatible implementations. -type UnimplementedIngesterServer struct { -} - -func (*UnimplementedIngesterServer) Push(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") -} -func (*UnimplementedIngesterServer) Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") -} -func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { - return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") -} -func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") -} -func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") -} -func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") -} -func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") -} -func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") -} -func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { - return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") -} -func (*UnimplementedIngesterServer) TransferTSDB(srv Ingester_TransferTSDBServer) error { - return status.Errorf(codes.Unimplemented, "method TransferTSDB not implemented") -} - -func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { - s.RegisterService(&_Ingester_serviceDesc, srv) -} - -func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Push(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Push", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Push(ctx, req.(*WriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Query(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Query", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Query(ctx, req.(*QueryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(QueryRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) -} - -type Ingester_QueryStreamServer interface { - Send(*QueryStreamResponse) error - grpc.ServerStream -} - -type ingesterQueryStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelValues(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelValues", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).UserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/UserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).AllUserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/AllUserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsForLabelMatchersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) -} - -type Ingester_TransferChunksServer interface { - SendAndClose(*TransferChunksResponse) error - Recv() (*TimeSeriesChunk, error) - grpc.ServerStream -} - -type ingesterTransferChunksServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { - m := new(TimeSeriesChunk) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Ingester_TransferTSDB_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferTSDB(&ingesterTransferTSDBServer{stream}) -} - -type Ingester_TransferTSDBServer interface { - SendAndClose(*TransferTSDBResponse) error - Recv() (*TimeSeriesFile, error) - grpc.ServerStream -} - -type ingesterTransferTSDBServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferTSDBServer) SendAndClose(m *TransferTSDBResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferTSDBServer) Recv() (*TimeSeriesFile, error) { - m := new(TimeSeriesFile) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Ingester_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cortex.Ingester", - HandlerType: (*IngesterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Push", - Handler: _Ingester_Push_Handler, - }, - { - MethodName: "Query", - Handler: _Ingester_Query_Handler, - }, - { - MethodName: "LabelValues", - Handler: _Ingester_LabelValues_Handler, - }, - { - MethodName: "LabelNames", - Handler: _Ingester_LabelNames_Handler, - }, - { - MethodName: "UserStats", - Handler: _Ingester_UserStats_Handler, - }, - { - MethodName: "AllUserStats", - Handler: _Ingester_AllUserStats_Handler, - }, - { - MethodName: "MetricsForLabelMatchers", - Handler: _Ingester_MetricsForLabelMatchers_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QueryStream", - Handler: _Ingester_QueryStream_Handler, - ServerStreams: true, - }, - { - StreamName: "TransferChunks", - Handler: _Ingester_TransferChunks_Handler, - ClientStreams: true, - }, - { - StreamName: "TransferTSDB", - Handler: _Ingester_TransferTSDB_Handler, - ClientStreams: true, - }, - }, - Metadata: "cortex.proto", -} - -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Source != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.Source)) - i-- - dAtA[i] = 0x10 - } - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Timeseries[iNdEx].Size() - i -= size - if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WriteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Queries) > 0 { - for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ReadResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelName) > 0 { - i -= len(m.LabelName) - copy(dAtA[i:], m.LabelName) - i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelValues) > 0 { - for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelValues[iNdEx]) - copy(dAtA[i:], m.LabelValues[iNdEx]) - i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelNames) > 0 { - for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelNames[iNdEx]) - copy(dAtA[i:], m.LabelNames[iNdEx]) - i = encodeVarintCortex(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RuleIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) - i-- - dAtA[i] = 0x21 - } - if m.ApiIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) - i-- - dAtA[i] = 0x19 - } - if m.NumSeries != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.NumSeries)) - i-- - dAtA[i] = 0x10 - } - if m.IngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MatchersSet) > 0 { - for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Chunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if m.Encoding != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.Encoding)) - i-- - dAtA[i] = 0x18 - } - if m.EndTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TimestampMs != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCortex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintCortex(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if len(m.Filename) > 0 { - i -= len(m.Filename) - copy(dAtA[i:], m.Filename) - i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) - i-- - dAtA[i] = 0x1a - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransferTSDBResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { - offset -= sovCortex(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if m.Source != 0 { - n += 1 + sovCortex(uint64(m.Source)) - } - return n -} - -func (m *WriteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Queries) > 0 { - for _, e := range m.Queries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *ReadResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *QueryStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelValuesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *LabelValuesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - l = len(s) - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelNamesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *LabelNamesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *UserStatsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *UserStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IngestionRate != 0 { - n += 9 - } - if m.NumSeries != 0 { - n += 1 + sovCortex(uint64(m.NumSeries)) - } - if m.ApiIngestionRate != 0 { - n += 9 - } - if m.RuleIngestionRate != 0 { - n += 9 - } - return n -} - -func (m *UserIDStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *UsersStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if len(m.MatchersSet) > 0 { - for _, e := range m.MatchersSet { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *TimeSeriesChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovCortex(uint64(m.EndTimestampMs)) - } - if m.Encoding != 0 { - n += 1 + sovCortex(uint64(m.Encoding)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TransferChunksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *TimeSeries) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 9 - } - if m.TimestampMs != 0 { - n += 1 + sovCortex(uint64(m.TimestampMs)) - } - return n -} - -func (m *LabelMatchers) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } - return n -} - -func (m *LabelMatcher) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovCortex(uint64(m.Type)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TimeSeriesFile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Filename) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -func (m *TransferTSDBResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovCortex(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCortex(x uint64) (n int) { - return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *WriteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteRequest{`, - `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `}`, - }, "") - return s -} -func (this *WriteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteResponse{`, - `}`, - }, "") - return s -} -func (this *ReadRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForQueries := "[]*QueryRequest{" - for _, f := range this.Queries { - repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + "," - } - repeatedStringForQueries += "}" - s := strings.Join([]string{`&ReadRequest{`, - `Queries:` + repeatedStringForQueries + `,`, - `}`, - }, "") - return s -} -func (this *ReadResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForResults := "[]*QueryResponse{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + "," - } - repeatedStringForResults += "}" - s := strings.Join([]string{`&ReadResponse{`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s -} -func (this *QueryRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," - } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&QueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s -} -func (this *QueryResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTimeseries := "[]TimeSeries{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + "," - } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&QueryResponse{`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *QueryStreamResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTimeseries := "[]TimeSeriesChunk{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + "," - } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&QueryStreamResponse{`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesRequest{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesResponse{`, - `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesRequest{`, - `}`, - }, "") - return s -} -func (this *LabelNamesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesResponse{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `}`, - }, "") - return s -} -func (this *UserStatsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsRequest{`, - `}`, - }, "") - return s -} -func (this *UserStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsResponse{`, - `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, - `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, - `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, - `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, - `}`, - }, "") - return s -} -func (this *UserIDStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserIDStatsResponse{`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *UsersStatsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStats := "[]*UserIDStatsResponse{" - for _, f := range this.Stats { - repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," - } - repeatedStringForStats += "}" - s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + repeatedStringForStats + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchersSet := "[]*LabelMatchers{" - for _, f := range this.MatchersSet { - repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," - } - repeatedStringForMatchersSet += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + repeatedStringForMatchersSet + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetric := "[]*Metric{" - for _, f := range this.Metric { - repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + "," - } - repeatedStringForMetric += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + repeatedStringForMetric + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesChunk) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&TimeSeriesChunk{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *Chunk) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Chunk{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *TransferChunksResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferChunksResponse{`, - `}`, - }, "") - return s -} -func (this *TimeSeries) String() string { - if this == nil { - return "nil" - } - repeatedStringForSamples := "[]Sample{" - for _, f := range this.Samples { - repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," - } - repeatedStringForSamples += "}" - s := strings.Join([]string{`&TimeSeries{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Samples:` + repeatedStringForSamples + `,`, - `}`, - }, "") - return s -} -func (this *LabelPair) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelPair{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Sample) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sample{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, - `}`, - }, "") - return s -} -func (this *LabelMatchers) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," - } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s -} -func (this *Metric) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Metric{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `}`, - }, "") - return s -} -func (this *LabelMatcher) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelMatcher{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TimeSeriesFile{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *TransferTSDBResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferTSDBResponse{`, - `}`, - }, "") - return s -} -func valueToStringCortex(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, PreallocTimeseries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - m.Source = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queries = append(m.Queries, &QueryRequest{}) - if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, &QueryResponse{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, TimeSeriesChunk{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.IngestionRate = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) - } - m.NumSeries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumSeries |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ApiIngestionRate = float64(math.Float64frombits(v)) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.RuleIngestionRate = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &UserStatsResponse{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, &UserIDStatsResponse{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) - if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metric = append(m.Metric, &Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) - } - m.Encoding = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Encoding |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) - } - m.TimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatchers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatcher) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= MatchType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferTSDBResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferTSDBResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferTSDBResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCortex(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCortex - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthCortex - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortex - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCortex(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthCortex - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow") -) diff --git a/integration-tests/proto/1.3.0/timeseries.go b/integration-tests/proto/1.3.0/timeseries.go deleted file mode 100644 index 76f56ceaf5..0000000000 --- a/integration-tests/proto/1.3.0/timeseries.go +++ /dev/null @@ -1,273 +0,0 @@ -package client - -import ( - "flag" - "fmt" - "io" - "strings" - "sync" - "unsafe" - - "github.com/prometheus/prometheus/pkg/labels" -) - -var ( - expectedTimeseries = 100 - expectedLabels = 20 - expectedSamplesPerSeries = 10 - - slicePool = sync.Pool{ - New: func() interface{} { - return make([]PreallocTimeseries, 0, expectedTimeseries) - }, - } - - timeSeriesPool = sync.Pool{ - New: func() interface{} { - return &TimeSeries{ - Labels: make([]LabelAdapter, 0, expectedLabels), - Samples: make([]Sample, 0, expectedSamplesPerSeries), - } - }, - } -) - -// PreallocConfig configures how structures will be preallocated to optimise -// proto unmarshalling. -type PreallocConfig struct{} - -// RegisterFlags registers configuration settings. -func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, use for preallocations.") - f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.") - f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.") -} - -// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal. -type PreallocWriteRequest struct { - WriteRequest -} - -// Unmarshal implements proto.Message. -func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error { - p.Timeseries = slicePool.Get().([]PreallocTimeseries) - return p.WriteRequest.Unmarshal(dAtA) -} - -// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal. -type PreallocTimeseries struct { - *TimeSeries -} - -// Unmarshal implements proto.Message. -func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { - p.TimeSeries = timeSeriesPool.Get().(*TimeSeries) - return p.TimeSeries.Unmarshal(dAtA) -} - -// LabelAdapter is a labels.Label that can be marshalled to/from protos. -type LabelAdapter labels.Label - -// Marshal implements proto.Marshaller. -func (bs *LabelAdapter) Marshal() ([]byte, error) { - size := bs.Size() - buf := make([]byte, size) - n, err := bs.MarshalToSizedBuffer(buf[:size]) - return buf[:n], err -} - -func (m *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -// MarshalTo implements proto.Marshaller. -func (bs *LabelAdapter) MarshalToSizedBuffer(buf []byte) (n int, err error) { - ls := (*labels.Label)(bs) - i := len(buf) - if len(ls.Value) > 0 { - i -= len(ls.Value) - copy(buf[i:], ls.Value) - i = encodeVarintCortex(buf, i, uint64(len(ls.Value))) - i-- - buf[i] = 0x12 - } - if len(ls.Name) > 0 { - i -= len(ls.Name) - copy(buf[i:], ls.Name) - i = encodeVarintCortex(buf, i, uint64(len(ls.Name))) - i-- - buf[i] = 0xa - } - return len(buf) - i, nil -} - -// Unmarshal a LabelAdapater, implements proto.Unmarshaller. -// NB this is a copy of the autogenerated code to unmarshal a LabelPair, -// with the byte copying replaced with a yoloString. -func (bs *LabelAdapter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Name = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCortex - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCortex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - bs.Value = yoloString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCortex(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func yoloString(buf []byte) string { - return *((*string)(unsafe.Pointer(&buf))) -} - -// Size implements proto.Sizer. -func (bs *LabelAdapter) Size() (n int) { - ls := (*labels.Label)(bs) - if bs == nil { - return 0 - } - var l int - _ = l - l = len(ls.Name) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - l = len(ls.Value) - if l > 0 { - n += 1 + l + sovCortex(uint64(l)) - } - return n -} - -// Equal implements proto.Equaler. -func (bs *LabelAdapter) Equal(other LabelAdapter) bool { - return bs.Name == other.Name && bs.Value == other.Value -} - -// Compare implements proto.Comparer. -func (bs *LabelAdapter) Compare(other LabelAdapter) int { - if c := strings.Compare(bs.Name, other.Name); c != 0 { - return c - } - return strings.Compare(bs.Value, other.Value) -} - -// ReuseSlice puts the slice back into a sync.Pool for reuse. -func ReuseSlice(slice []PreallocTimeseries) { - for i := range slice { - ReuseTimeseries(slice[i].TimeSeries) - } - slicePool.Put(slice[:0]) -} - -// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. -func ReuseTimeseries(ts *TimeSeries) { - ts.Labels = ts.Labels[:0] - ts.Samples = ts.Samples[:0] - timeSeriesPool.Put(ts) -} diff --git a/integration-tests/proto/marshal_test.go b/integration-tests/proto/marshal_test.go deleted file mode 100644 index fc5e3ec59d..0000000000 --- a/integration-tests/proto/marshal_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package main - -import ( - "testing" - - proto12 "github.com/cortexproject/cortex/integration-tests/proto/1.2.1" - proto13 "github.com/cortexproject/cortex/integration-tests/proto/1.3.0" - "github.com/stretchr/testify/require" -) - -func Test_CustomMarshal(t *testing.T) { - l13 := proto13.LabelAdapter{Name: "foo", Value: "bar"} - b3, err := l13.Marshal() - if err != nil { - t.Fatal(err) - } - l12 := proto12.LabelAdapter{} - err = l12.Unmarshal(b3) - if err != nil { - t.Fatal(err) - } - require.EqualValues(t, l13, l12) - - b2, err := l12.Marshal() - if err != nil { - t.Fatal(err) - } - err = l13.Unmarshal(b2) - if err != nil { - t.Fatal(err) - } - require.EqualValues(t, l13, l12) - - t13 := proto13.TimeSeries{ - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "1", Value: "2"}, - }, - Samples: []proto13.Sample{ - proto13.Sample{Value: 10, TimestampMs: 1}, - proto13.Sample{Value: 10000, TimestampMs: 2}, - }, - } - bt13, err := t13.Marshal() - if err != nil { - t.Fatal(err) - } - t12 := proto12.TimeSeries{} - err = t12.Unmarshal(bt13) - if err != nil { - t.Fatal(err) - } - require.EqualValues(t, t13, t12) - - bt12, err := t12.Marshal() - if err != nil { - t.Fatal(err) - } - t13 = proto13.TimeSeries{} - err = t13.Unmarshal(bt12) - if err != nil { - t.Fatal(err) - } - require.EqualValues(t, t13, t12) -} diff --git a/integration-tests/proto/proto_test.go b/integration-tests/proto/proto_test.go deleted file mode 100644 index 2b7aa4affe..0000000000 --- a/integration-tests/proto/proto_test.go +++ /dev/null @@ -1,437 +0,0 @@ -package main - -import ( - "context" - "log" - "net" - "sync" - "testing" - - proto12 "github.com/cortexproject/cortex/integration-tests/proto/1.2.1" - proto13 "github.com/cortexproject/cortex/integration-tests/proto/1.3.0" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -const bufSize = 1024 * 1024 - -var ( - lis *bufconn.Listener - server *grpc.Server - labelNames = []string{"foo", "fuzz", "bar", "buzz"} - labelValues = []string{"lfoo", "lfuzz", "lbar", "lbuzz"} - wg = sync.WaitGroup{} -) - -func init() { - lis = bufconn.Listen(bufSize) - server = grpc.NewServer() -} - -func bufDialer(context.Context, string) (net.Conn, error) { - return lis.Dial() -} - -func Test_ProtoCompat(t *testing.T) { - ctx := context.Background() - conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) - if err != nil { - t.Fatalf("Failed to dial bufnet: %v", err) - } - defer conn.Close() - - // 1.3.0 server - proto13.RegisterIngesterServer(server, &fakeIngesterServer{t}) - // 1.2.1 client - client := proto12.NewIngesterClient(conn) - - go func() { - if err := server.Serve(lis); err != nil { - log.Fatalf("Server exited with error: %v", err) - } - }() - - l, err := client.LabelNames(ctx, &proto12.LabelNamesRequest{}) - if err != nil { - t.Fatal(err) - } - require.Equal(t, labelNames, l.LabelNames) - - v, err := client.LabelValues(ctx, &proto12.LabelValuesRequest{LabelName: "foo"}) - if err != nil { - t.Fatal(err) - } - require.Equal(t, labelValues, v.LabelValues) - - s, err := client.UserStats(ctx, &proto12.UserStatsRequest{}) - if err != nil { - t.Fatal(err) - } - require.Equal(t, &proto12.UserStatsResponse{ - IngestionRate: 10.5, - NumSeries: 1000000, - ApiIngestionRate: 20.245, - RuleIngestionRate: 39.033, - }, s) - - r, err := client.Push(ctx, &proto12.WriteRequest{ - Source: proto12.RULE, - Timeseries: []proto12.PreallocTimeseries{ - proto12.PreallocTimeseries{ - TimeSeries: &proto12.TimeSeries{ - Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "foo", Value: "bar"}, - proto12.LabelAdapter{Name: "fuzz", Value: "buzz"}, - }, - Samples: []proto12.Sample{ - proto12.Sample{Value: 10, TimestampMs: 1}, - proto12.Sample{Value: 10000, TimestampMs: 2}, - }, - }, - }, - proto12.PreallocTimeseries{ - TimeSeries: &proto12.TimeSeries{ - Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "foo", Value: "buzz"}, - proto12.LabelAdapter{Name: "fuzz", Value: "bar"}, - }, - Samples: []proto12.Sample{ - proto12.Sample{Value: 20.1234, TimestampMs: 1}, - proto12.Sample{Value: 25.1233, TimestampMs: 2}, - }, - }, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - require.NotNil(t, r) - q, err := client.Query(ctx, &proto12.QueryRequest{ - StartTimestampMs: 1111, - EndTimestampMs: 20000, - Matchers: []*proto12.LabelMatcher{ - &proto12.LabelMatcher{Type: proto12.REGEX_MATCH, Name: "foo", Value: "bar"}, - &proto12.LabelMatcher{Type: proto12.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, - }, - }) - if err != nil { - t.Fatal(err) - } - require.Equal(t, &proto12.QueryResponse{ - Timeseries: []proto12.TimeSeries{ - proto12.TimeSeries{ - Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "foo", Value: "bar"}, - proto12.LabelAdapter{Name: "fuzz", Value: "buzz"}, - }, - Samples: []proto12.Sample{ - proto12.Sample{Value: 10, TimestampMs: 1}, - proto12.Sample{Value: 10000, TimestampMs: 2}, - }, - }, - }, - }, q) - - stream, err := client.QueryStream(ctx, &proto12.QueryRequest{ - StartTimestampMs: 1111, - EndTimestampMs: 20000, - Matchers: []*proto12.LabelMatcher{ - &proto12.LabelMatcher{Type: proto12.REGEX_MATCH, Name: "foo", Value: "bar"}, - &proto12.LabelMatcher{Type: proto12.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, - }, - }) - if err != nil { - t.Fatal(err) - } - streamResponse, err := stream.Recv() - if err != nil { - t.Fatal(err) - } - require.Equal(t, &proto12.QueryStreamResponse{ - Timeseries: []proto12.TimeSeriesChunk{ - proto12.TimeSeriesChunk{ - FromIngesterId: "1", - UserId: "2", - Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "foo", Value: "barr"}, - }, - Chunks: []proto12.Chunk{ - proto12.Chunk{ - StartTimestampMs: 1, - EndTimestampMs: 1000, - Encoding: 5, - Data: []byte{1, 1, 3, 5, 12}, - }, - }, - }, - }, - }, streamResponse) - stats, err := client.AllUserStats(ctx, &proto12.UserStatsRequest{}) - if err != nil { - t.Fatal(err) - } - require.Equal(t, &proto12.UsersStatsResponse{ - Stats: []*proto12.UserIDStatsResponse{ - &proto12.UserIDStatsResponse{ - UserId: "fake", - Data: &proto12.UserStatsResponse{ - IngestionRate: 1, - NumSeries: 2, - ApiIngestionRate: 2, - RuleIngestionRate: 1.223, - }, - }, - }, - }, stats) - - m, err := client.MetricsForLabelMatchers(ctx, &proto12.MetricsForLabelMatchersRequest{ - StartTimestampMs: 1, - EndTimestampMs: 2, - MatchersSet: []*proto12.LabelMatchers{ - &proto12.LabelMatchers{ - Matchers: []*proto12.LabelMatcher{ - &proto12.LabelMatcher{ - Type: proto12.REGEX_MATCH, - Name: "foo", - Value: "buzz", - }, - }, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - require.Equal(t, &proto12.MetricsForLabelMatchersResponse{ - Metric: []*proto12.Metric{ - &proto12.Metric{Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "foo", Value: "bar"}, - }}, - }, - }, m) - - chunkStream, err := client.TransferChunks(ctx) - if err != nil { - t.Fatal(err) - } - wg.Add(1) - err = chunkStream.Send(&proto12.TimeSeriesChunk{ - FromIngesterId: "foo", - UserId: "fake", - Labels: []proto12.LabelAdapter{ - proto12.LabelAdapter{Name: "bar", Value: "buzz"}, - }, - Chunks: []proto12.Chunk{ - proto12.Chunk{ - StartTimestampMs: 1, - EndTimestampMs: 2, - Encoding: 3, - Data: []byte{1, 23, 24, 43, 4, 123}, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - wg.Wait() - err = stream.CloseSend() - require.NoError(t, err) - tsStream, err := client.TransferTSDB(ctx) - require.NoError(t, err) - wg.Add(1) - err = tsStream.Send(&proto12.TimeSeriesFile{ - FromIngesterId: "string", - UserId: "fake", - Filename: "here", - Data: []byte{1, 2, 3, 5, 6, 7}, - }) - require.NoError(t, err) - wg.Wait() - require.NoError(t, tsStream.CloseSend()) -} - -type fakeIngesterServer struct { - t *testing.T -} - -func (f *fakeIngesterServer) LabelValues(_ context.Context, v *proto13.LabelValuesRequest) (*proto13.LabelValuesResponse, error) { - require.Equal(f.t, "foo", v.LabelName) - return &proto13.LabelValuesResponse{ - LabelValues: labelValues, - }, nil -} -func (fakeIngesterServer) LabelNames(context.Context, *proto13.LabelNamesRequest) (*proto13.LabelNamesResponse, error) { - return &proto13.LabelNamesResponse{ - LabelNames: labelNames, - }, nil -} -func (fakeIngesterServer) UserStats(context.Context, *proto13.UserStatsRequest) (*proto13.UserStatsResponse, error) { - return &proto13.UserStatsResponse{ - IngestionRate: 10.5, - NumSeries: 1000000, - ApiIngestionRate: 20.245, - RuleIngestionRate: 39.033, - }, nil -} -func (f *fakeIngesterServer) Push(_ context.Context, req *proto13.WriteRequest) (*proto13.WriteResponse, error) { - require.Equal(f.t, &proto13.WriteRequest{ - Source: proto13.RULE, - Timeseries: []proto13.PreallocTimeseries{ - proto13.PreallocTimeseries{ - TimeSeries: &proto13.TimeSeries{ - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "foo", Value: "bar"}, - proto13.LabelAdapter{Name: "fuzz", Value: "buzz"}, - }, - Samples: []proto13.Sample{ - proto13.Sample{Value: 10, TimestampMs: 1}, - proto13.Sample{Value: 10000, TimestampMs: 2}, - }, - }, - }, - proto13.PreallocTimeseries{ - TimeSeries: &proto13.TimeSeries{ - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "foo", Value: "buzz"}, - proto13.LabelAdapter{Name: "fuzz", Value: "bar"}, - }, - Samples: []proto13.Sample{ - proto13.Sample{Value: 20.1234, TimestampMs: 1}, - proto13.Sample{Value: 25.1233, TimestampMs: 2}, - }, - }, - }, - }, - }, req) - return &proto13.WriteResponse{}, nil -} -func (f *fakeIngesterServer) Query(_ context.Context, req *proto13.QueryRequest) (*proto13.QueryResponse, error) { - require.Equal(f.t, &proto13.QueryRequest{ - StartTimestampMs: 1111, - EndTimestampMs: 20000, - Matchers: []*proto13.LabelMatcher{ - &proto13.LabelMatcher{Type: proto13.REGEX_MATCH, Name: "foo", Value: "bar"}, - &proto13.LabelMatcher{Type: proto13.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, - }, - }, req) - return &proto13.QueryResponse{ - Timeseries: []proto13.TimeSeries{ - proto13.TimeSeries{ - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "foo", Value: "bar"}, - proto13.LabelAdapter{Name: "fuzz", Value: "buzz"}, - }, - Samples: []proto13.Sample{ - proto13.Sample{Value: 10, TimestampMs: 1}, - proto13.Sample{Value: 10000, TimestampMs: 2}, - }, - }, - }, - }, nil -} -func (f *fakeIngesterServer) QueryStream(req *proto13.QueryRequest, stream proto13.Ingester_QueryStreamServer) error { - require.Equal(f.t, &proto13.QueryRequest{ - StartTimestampMs: 1111, - EndTimestampMs: 20000, - Matchers: []*proto13.LabelMatcher{ - &proto13.LabelMatcher{Type: proto13.REGEX_MATCH, Name: "foo", Value: "bar"}, - &proto13.LabelMatcher{Type: proto13.NOT_EQUAL, Name: "fuzz", Value: "buzz"}, - }, - }, req) - stream.Send(&proto13.QueryStreamResponse{ - Timeseries: []proto13.TimeSeriesChunk{ - proto13.TimeSeriesChunk{ - FromIngesterId: "1", - UserId: "2", - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "foo", Value: "barr"}, - }, - Chunks: []proto13.Chunk{ - proto13.Chunk{ - StartTimestampMs: 1, - EndTimestampMs: 1000, - Encoding: 5, - Data: []byte{1, 1, 3, 5, 12}, - }, - }, - }, - }, - }) - return nil -} -func (fakeIngesterServer) AllUserStats(context.Context, *proto13.UserStatsRequest) (*proto13.UsersStatsResponse, error) { - return &proto13.UsersStatsResponse{ - Stats: []*proto13.UserIDStatsResponse{ - &proto13.UserIDStatsResponse{ - UserId: "fake", - Data: &proto13.UserStatsResponse{ - IngestionRate: 1, - NumSeries: 2, - ApiIngestionRate: 2, - RuleIngestionRate: 1.223, - }, - }, - }, - }, nil -} -func (f *fakeIngesterServer) MetricsForLabelMatchers(_ context.Context, req *proto13.MetricsForLabelMatchersRequest) (*proto13.MetricsForLabelMatchersResponse, error) { - require.Equal(f.t, &proto13.MetricsForLabelMatchersRequest{ - StartTimestampMs: 1, - EndTimestampMs: 2, - MatchersSet: []*proto13.LabelMatchers{ - &proto13.LabelMatchers{ - Matchers: []*proto13.LabelMatcher{ - &proto13.LabelMatcher{ - Type: proto13.REGEX_MATCH, - Name: "foo", - Value: "buzz", - }, - }, - }, - }, - }, req) - return &proto13.MetricsForLabelMatchersResponse{ - Metric: []*proto13.Metric{ - &proto13.Metric{Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "foo", Value: "bar"}, - }}, - }, - }, nil -} -func (f *fakeIngesterServer) TransferChunks(s proto13.Ingester_TransferChunksServer) error { - c, err := s.Recv() - require.NoError(f.t, err) - require.Equal(f.t, &proto13.TimeSeriesChunk{ - FromIngesterId: "foo", - UserId: "fake", - Labels: []proto13.LabelAdapter{ - proto13.LabelAdapter{Name: "bar", Value: "buzz"}, - }, - Chunks: []proto13.Chunk{ - proto13.Chunk{ - StartTimestampMs: 1, - EndTimestampMs: 2, - Encoding: 3, - Data: []byte{1, 23, 24, 43, 4, 123}, - }, - }, - }, c) - wg.Done() - return nil -} - -// TransferTSDB transfers all files of a tsdb to a joining ingester -func (f *fakeIngesterServer) TransferTSDB(stream proto13.Ingester_TransferTSDBServer) error { - file, err := stream.Recv() - require.NoError(f.t, err) - require.Equal(f.t, &proto13.TimeSeriesFile{ - FromIngesterId: "string", - UserId: "fake", - Filename: "here", - Data: []byte{1, 2, 3, 5, 6, 7}, - }, file) - wg.Done() - return nil -} diff --git a/pkg/ingester/client/timeseries.go b/pkg/ingester/client/timeseries.go index 445689c24a..0f8894b1a4 100644 --- a/pkg/ingester/client/timeseries.go +++ b/pkg/ingester/client/timeseries.go @@ -76,9 +76,9 @@ func (bs *LabelAdapter) Marshal() ([]byte, error) { return buf[:n], err } -func (m *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (bs *LabelAdapter) MarshalTo(dAtA []byte) (int, error) { + size := bs.Size() + return bs.MarshalToSizedBuffer(dAtA[:size]) } // MarshalTo implements proto.Marshaller. diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go deleted file mode 100644 index cb95b7fa1b..0000000000 --- a/vendor/github.com/google/btree/btree_mem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// This binary compares memory usage between btree and gollrb. -package main - -import ( - "flag" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/google/btree" - "github.com/petar/GoLLRB/llrb" -) - -var ( - size = flag.Int("size", 1000000, "size of the tree to build") - degree = flag.Int("degree", 8, "degree of btree") - gollrb = flag.Bool("llrb", false, "use llrb instead of btree") -) - -func main() { - flag.Parse() - vals := rand.Perm(*size) - var t, v interface{} - v = vals - var stats runtime.MemStats - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- BEFORE ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - start := time.Now() - if *gollrb { - tr := llrb.New() - for _, v := range vals { - tr.ReplaceOrInsert(llrb.Int(v)) - } - t = tr // keep it around - } else { - tr := btree.New(*degree) - for _, v := range vals { - tr.ReplaceOrInsert(btree.Int(v)) - } - t = tr // keep it around - } - fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) - fmt.Println("-------- AFTER ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- AFTER GC ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - if t == v { - fmt.Println("to make sure vals and tree aren't GC'd") - } -} diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go deleted file mode 100644 index 7c634cdc5c..0000000000 --- a/vendor/github.com/lib/pq/oid/gen.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build ignore - -// Generate the table of OID values -// Run with 'go run gen.go'. -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "os/exec" - "strings" - - _ "github.com/lib/pq" -) - -// OID represent a postgres Object Identifier Type. -type OID struct { - ID int - Type string -} - -// Name returns an upper case version of the oid type. -func (o OID) Name() string { - return strings.ToUpper(o.Type) -} - -func main() { - datname := os.Getenv("PGDATABASE") - sslmode := os.Getenv("PGSSLMODE") - - if datname == "" { - os.Setenv("PGDATABASE", "pqgotest") - } - - if sslmode == "" { - os.Setenv("PGSSLMODE", "disable") - } - - db, err := sql.Open("postgres", "") - if err != nil { - log.Fatal(err) - } - rows, err := db.Query(` - SELECT typname, oid - FROM pg_type WHERE oid < 10000 - ORDER BY oid; - `) - if err != nil { - log.Fatal(err) - } - oids := make([]*OID, 0) - for rows.Next() { - var oid OID - if err = rows.Scan(&oid.Type, &oid.ID); err != nil { - log.Fatal(err) - } - oids = append(oids, &oid) - } - if err = rows.Err(); err != nil { - log.Fatal(err) - } - cmd := exec.Command("gofmt") - cmd.Stderr = os.Stderr - w, err := cmd.StdinPipe() - if err != nil { - log.Fatal(err) - } - f, err := os.Create("types.go") - if err != nil { - log.Fatal(err) - } - cmd.Stdout = f - err = cmd.Start() - if err != nil { - log.Fatal(err) - } - fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") - fmt.Fprintln(w, "\npackage oid") - fmt.Fprintln(w, "const (") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) - } - fmt.Fprintln(w, ")") - fmt.Fprintln(w, "var TypeName = map[Oid]string{") - for _, oid := range oids { - fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) - } - fmt.Fprintln(w, "}") - w.Close() - cmd.Wait() -} diff --git a/vendor/github.com/miekg/dns/duplicate_generate.go b/vendor/github.com/miekg/dns/duplicate_generate.go deleted file mode 100644 index 9b7a71b16e..0000000000 --- a/vendor/github.com/miekg/dns/duplicate_generate.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -` - -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - - if name == "PrivateRR" || name == "OPT" { - continue - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate the duplicate check for each type. - fmt.Fprint(b, "// isDuplicate() functions\n\n") - for _, name := range namedTypes { - - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name) - fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name) - fmt.Fprint(b, "if !ok { return false }\n") - fmt.Fprint(b, "_ = r2\n") - for i := 1; i < st.NumFields(); i++ { - field := st.Field(i).Name() - o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } - o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) } - - // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are - // *indirectly* defined as a slice in the net package). - if _, ok := st.Field(i).Type().(*types.Slice); ok { - o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") - - if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { - o3(`for i := 0; i < len(r1.%s); i++ { - if !isDuplicateName(r1.%s[i], r2.%s[i]) { - return false - } - }`) - - continue - } - - o3(`for i := 0; i < len(r1.%s); i++ { - if r1.%s[i] != r2.%s[i] { - return false - } - }`) - - continue - } - - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"a"`, `dns:"aaaa"`: - o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}") - case `dns:"cdomain-name"`, `dns:"domain-name"`: - o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}") - default: - o2("if r1.%s != r2.%s {\nreturn false\n}") - } - } - fmt.Fprintf(b, "return true\n}\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zduplicate.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 721a0fce32..0000000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,328 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"any"`: - o("off, err = packStringAny(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name) - fmt.Fprint(b, `rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"any"`: - o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return off, nil - } -`) - } - } - fmt.Fprintf(b, "return off, nil }\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index 8cda2a74c5..0000000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,285 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, - "CSYNC": {}, -} - -var packageHdr = ` -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n") - case `dns:"domain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n") - case `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, true)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, false)\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored - o("l += len(rr.%s)/2\n") - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2\n") - case st.Tag(i) == `dns:"any"`: - o("l += len(rr.%s)\n") - case st.Tag(i) == `dns:"a"`: - o("if len(rr.%s) != 0 { l += net.IPv4len }\n") - case st.Tag(i) == `dns:"aaaa"`: - o("if len(rr.%s) != 0 { l += net.IPv6len }\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"rr.Hdr"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - // For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element. - if t == "EDNS0" { - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/minio/minio-go/v6/functional_tests.go b/vendor/github.com/minio/minio-go/v6/functional_tests.go deleted file mode 100644 index fb66f06fe3..0000000000 --- a/vendor/github.com/minio/minio-go/v6/functional_tests.go +++ /dev/null @@ -1,10247 +0,0 @@ -// +build ignore - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "mime/multipart" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/dustin/go-humanize" - log "github.com/sirupsen/logrus" - - "github.com/minio/minio-go/v6" - "github.com/minio/minio-go/v6/pkg/encrypt" -) - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -var dataFileMap = map[string]int{ - "datafile-1-b": 1, - "datafile-10-kB": 10 * humanize.KiByte, - "datafile-33-kB": 33 * humanize.KiByte, - "datafile-100-kB": 100 * humanize.KiByte, - "datafile-1.03-MB": 1056 * humanize.KiByte, - "datafile-1-MB": 1 * humanize.MiByte, - "datafile-5-MB": 5 * humanize.MiByte, - "datafile-6-MB": 6 * humanize.MiByte, - "datafile-11-MB": 11 * humanize.MiByte, - "datafile-129-MB": 129 * humanize.MiByte, -} - -func isFullMode() bool { - return os.Getenv("MINT_MODE") == "full" -} - -func getFuncName() string { - pc, _, _, _ := runtime.Caller(1) - return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") -} - -// Tests bucket re-create errors. -func testMakeBucketError() { - region := "eu-central-1" - - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket Failed", err) - return - } - if err = c.MakeBucket(bucketName, region); err == nil { - logError(testName, function, args, startTime, "", "Bucket already exists", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testMetadataSizeLimit() { - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts.UserMetadata": "", - } - rand.Seed(startTime.Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - const HeaderSizeLimit = 8 * 1024 - const UserMetadataLimit = 2 * 1024 - - // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail - metadata := make(map[string]string) - metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) - return - } - - // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail - metadata = make(map[string]string) - metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegions() { - region := "eu-central-1" - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - region = "us-west-2" - args["region"] = region - if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectReadAt() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "objectContentType", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object content type - objectContentType := "binary/octet-stream" - args["objectContentType"] = objectContentType - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Get Object failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat Object failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) - return - } - if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content types don't match", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectWithMetadata() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object custom metadata - customContentType := "custom/contenttype" - - args["metadata"] = map[string][]string{ - "Content-Type": {customContentType}, - "X-Amz-Meta-CustomKey": {"extra spaces in value"}, - } - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithContentLanguage() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - data := bytes.Repeat([]byte("a"), int(0)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en", - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err) - return - } - - objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objInfo.Metadata.Get("Content-Language") != "en" { - logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with streaming signature. -func testPutObjectStreaming() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} - - for _, size := range sizes { - data := bytes.Repeat([]byte("a"), int(size)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if n != size { - logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object seeker from the end, using whence set to '2'. -func testGetObjectSeekEnd() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - pos, err := r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - buf2 := make([]byte, 100) - m, err := io.ReadFull(r, buf2) - if err != nil { - logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) - return - } - hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) - hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) - if hexBuf1 != hexBuf2 { - logError(testName, function, args, startTime, "", "Values at same index dont match", err) - return - } - pos, err = r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - if err = r.Close(); err != nil { - logError(testName, function, args, startTime, "", "ObjectClose failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwice() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test RemoveObjectsWithContext request context cancels after timeout -func testRemoveObjectsWithContext() { - // Initialize logging params. - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - - // Generate put data. - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 20 objects. - nrObjects := 20 - objectsCh := make(chan string) - go func() { - defer close(objectsCh) - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- objectName - } - }() - // Set context to cancel in 1 nanosecond. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Call RemoveObjectsWithContext API with short timeout. - errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) - // Check for error. - select { - case r := <-errorCh: - if r.Err == nil { - logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err) - return - } - } - // Set context with longer timeout. - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed. - errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh) - select { - case r, more := <-errorCh: - if more || r.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - // Delete all objects and buckets. - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test removing multiple objects with Remove API -func testRemoveMultipleObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 1100 objects - nrObjects := 200 - - objectsCh := make(chan string) - - go func() { - defer close(objectsCh) - // Upload objects and send them to objectsCh - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- objectName - } - }() - - // Call RemoveObjects API - errorCh := c.RemoveObjects(bucketName, objectsCh) - - // Check if errorCh doesn't receive any error - select { - case r, more := <-errorCh: - if more { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject of a big file to trigger multipart -func testFPutObjectMultipart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getMintDataDirFilePath("datafile-129-MB") - if fileName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - fileName = file.Name() - args["fileName"] = fileName - } - totalSize := dataFileMap["datafile-129-MB"] - // Set base object name - objectName := bucketName + "FPutObject" + "-standard" - args["objectName"] = objectName - - objectContentType := "testapplication/octet-stream" - args["objectContentType"] = objectContentType - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", err) - return - } - if objInfo.Size != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) - return - } - if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType doesn't match", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject with null contentType (default = application/octet-stream) -func testFPutObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - - // Make a new bucket. - args["bucketName"] = bucketName - args["location"] = location - function = "MakeBucket()bucketName, location" - err = c.MakeBucket(bucketName, location) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-129-MB") - if fName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-129-MB"] - - // Set base object name - function = "FPutObject(bucketName, objectName, fileName, opts)" - objectName := bucketName + "FPutObject" - args["objectName"] = objectName + "-standard" - args["fileName"] = fName - args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - srcFile, err := os.Open(fName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - defer srcFile.Close() - // Add extension to temp file name - tmpFile, err := os.Create(fName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File create failed", err) - return - } - defer tmpFile.Close() - _, err = io.Copy(tmpFile, srcFile) - if err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-GTar" - args["opts"] = minio.PutObjectOptions{} - n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - // Check headers - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-standard" - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-Octet" - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-GTar" - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - if err = os.Remove(fName + ".gtar"); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-1-MB"] - - // Set base object name - objectName := bucketName + "FPutObjectWithContext" - args["objectName"] = objectName - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) - return - } - - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests FPutObjectWithContext request context cancels after timeout -func testFPutObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{ContentType:objectContentType}", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "Temp file creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - totalSize := dataFileMap["datafile-1-MB"] - - // Set base object name - objectName := bucketName + "FPutObjectWithContext" - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed - n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err) - return - } - if n != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err) - return - } - - _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test validates putObject with context to see if request cancellation is honored. -func testPutObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} - defer cancel() - - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - // Generic seek error for errors other than io.EOF - seekErr := errors.New("seek error") - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, seekErr, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, seekErr, false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - // We expect an error - if testCase.err == seekErr && err == nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // We expect a specific error - if testCase.err != seekErr && testCase.err != err { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // If we expect an error go to the next loop - if testCase.err != nil { - continue - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Reproduces issue https://github.com/minio/minio-go/issues/1137 -func testGetObjectReadAtWhenEOFWasReached() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read directly - buf1 := make([]byte, n) - buf2 := make([]byte, 512) - - m, err := r.Read(buf1) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf) { - logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, 512) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[512:1024]) { - logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test Presigned Post Policy -func testPresignedPostPolicy() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PresignedPostPolicy(policy)" - args := map[string]interface{}{ - "policy": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - // Azure requires the key to not start with a number - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") - metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - - policy.SetBucket(bucketName) - policy.SetKey(objectName) - policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("binary/octet-stream") - policy.SetContentLengthRange(10, 1024*1024) - policy.SetUserMetadata(metadataKey, metadataValue) - args["policy"] = policy.String() - - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) - return - } - - var formBuf bytes.Buffer - writer := multipart.NewWriter(&formBuf) - for k, v := range formData { - writer.WriteField(k, v) - } - - // Get a 33KB file to upload and test if set post policy works - var filePath = getMintDataDirFilePath("datafile-33-kB") - if filePath == "" { - // Make a temp file with 33 KB data. - file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - filePath = file.Name() - } - - // add file to post request - f, err := os.Open(filePath) - defer f.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - w, err := writer.CreateFormFile("file", filePath) - if err != nil { - logError(testName, function, args, startTime, "", "CreateFormFile failed", err) - return - } - - _, err = io.Copy(w, f) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - writer.Close() - - // make post request with correct form data - res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes())) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) - return - } - - // expected path should be absolute path of the object - var scheme string - if mustParseBool(os.Getenv(enableHTTPS)) { - scheme = "https://" - } else { - scheme = "http://" - } - - expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName - expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - - if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { - logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) - return - } - } else { - logError(testName, function, args, startTime, "", "Location not found in header response", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(dst, src)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) - args["src"] = src - - // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return - } - - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) - args["dst"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // Perform the Copy - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) - return - } - - // Close all the get readers before proceeding with CopyObject operations. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return - } - - // Perform the Copy which should fail - err = c.CopyObject(dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - // Perform the Copy which should update only metadata. - src = minio.NewSourceInfo(bucketName, objectName, nil) - dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{ - "Copy": "should be same", - }) - args["dst"] = dst - args["src"] = src - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) - return - } - - oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - stOpts := minio.StatObjectOptions{} - stOpts.SetMatchETag(oi.ETag) - objInfo, err = c.StatObject(bucketName, objectName, stOpts) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) - return - } - - if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { - logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - if err = cleanupBucket(bucketName+"-copy", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderSeeker interface methods. -func testSSECEncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderSeeker interface methods. -func testSSES3EncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderAt interface methods. -func testSSECEncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderAt interface methods. -func testSSES3EncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) - return - } - - // read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// testSSECEncryptionPutGet tests encryption with customer provided encryption keys -func testSSECEncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestEncryptionFPut tests encryption with customer specified encryption keys -func testSSECEncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSES3EncryptionPutGet tests SSE-S3 encryption -func testSSES3EncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back without any encryption headers - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestSSES3EncryptionFPut tests server side encryption -func testSSES3EncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testBucketNotification() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "SetBucketNotification(bucketName)" - args := map[string]interface{}{ - "bucketName": "", - } - - if os.Getenv("NOTIFY_BUCKET") == "" || - os.Getenv("NOTIFY_SERVICE") == "" || - os.Getenv("NOTIFY_REGION") == "" || - os.Getenv("NOTIFY_ACCOUNTID") == "" || - os.Getenv("NOTIFY_RESOURCE") == "" { - ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := os.Getenv("NOTIFY_BUCKET") - args["bucketName"] = bucketName - - topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) - queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") - - topicConfig := minio.NewNotificationConfig(topicArn) - - topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) - topicConfig.AddFilterSuffix("jpg") - - queueConfig := minio.NewNotificationConfig(queueArn) - queueConfig.AddEvents(minio.ObjectCreatedAll) - queueConfig.AddFilterPrefix("photos/") - - bNotification := minio.BucketNotification{} - bNotification.AddTopic(topicConfig) - - // Add the same topicConfig again, should have no effect - // because it is duplicated - bNotification.AddTopic(topicConfig) - if len(bNotification.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Duplicate entry added", err) - return - } - - // Add and remove a queue config - bNotification.AddQueue(queueConfig) - bNotification.RemoveQueueByArn(queueArn) - - err = c.SetBucketNotification(bucketName, bNotification) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) - return - } - - bNotification, err = c.GetBucketNotification(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) - return - } - - if len(bNotification.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Topic config is empty", err) - return - } - - if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { - logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) - return - } - - err = c.RemoveAllBucketNotification(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests comprehensive list of all methods. -func testFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctional()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket. - function = "MakeBucket(bucketName, region)" - functionAll = "MakeBucket(bucketName, region)" - args["bucketName"] = bucketName - err = c.MakeBucket(bucketName, "us-east-1") - - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File creation failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "File write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find the bucket", err) - return - } - - // Verify if bucket exits and you have access with context. - function = "BucketExistsWithContext(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExistsWithContext(context.Background(), bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "BucketExistsWithContext failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find the bucket", err) - return - } - - // Asserting the default bucket policy. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - nilPolicy, err := c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - if nilPolicy != "" { - logError(testName, function, args, startTime, "", "policy should be set to nil", err) - return - } - - // Set the bucket policy to 'public readonly'. - function = "SetBucketPolicy(bucketName, readOnlyPolicy)" - functionAll += ", " + function - - readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readOnlyPolicy, - } - - err = c.SetBucketPolicy(bucketName, readOnlyPolicy) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readonly`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public writeonly'. - function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" - functionAll += ", " + function - - writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": writeOnlyPolicy, - } - err = c.SetBucketPolicy(bucketName, writeOnlyPolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `writeonly`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, readWritePolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readwrite`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets() - - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // List all buckets with context. - function = "ListBucketsWithContext()" - functionAll += ", " + function - args = nil - buckets, err = c.ListBucketsWithContext(context.Background()) - - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBucketsWithContext failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("f"), 1<<19) - - function = "PutObject(bucketName, objectName, reader, contentType)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-nolength", - "contentType": "binary/octet-stream", - } - - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - objFound = false - isRecursive = true // Recursive is true. - function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - incompObjNotFound := true - - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) - return - } - newReader.Close() - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject failed", err) - return - } - - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - resp.Body.Close() - - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedGetObject success", err) - return - } - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - "reqParams": reqParams, - } - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) - return - } - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedPutObject success", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - buf = bytes.Repeat([]byte("g"), 1<<19) - - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) - return - } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "RemoveObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - err = c.RemoveObject(bucketName, objectName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - args["objectName"] = objectName + "-f" - err = c.RemoveObject(bucketName, objectName+"-f") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-nolength" - err = c.RemoveObject(bucketName, objectName+"-nolength") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-presigned" - err = c.RemoveObject(bucketName, objectName+"-presigned") - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - function = "RemoveBucket(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - err = c.RemoveBucket(bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - err = c.RemoveBucket(bucketName) - if err == nil { - logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) - return - } - if err.Error() != "The specified bucket does not exist" { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File Remove failed", err) - return - } - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test for validating GetObject Reader* methods functioning when the -// object is modified in the object store. -func testGetObjectModified() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - - // Upload an object. - objectName := "myobject" - args["objectName"] = objectName - content := "helloworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - defer c.RemoveObject(bucketName, objectName) - - reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) - return - } - defer reader.Close() - - // Read a few bytes of the object. - b := make([]byte, 5) - n, err := reader.ReadAt(b, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) - return - } - - // Upload different contents to the same object while object is being read. - newContent := "goodbyeworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - // Confirm that a Stat() call in between doesn't change the Object's cached etag. - _, err = reader.Stat() - expectedError := "At least one of the pre-conditions you specified did not hold" - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - // Read again only to find object contents have been modified since last read. - _, err = reader.ReadAt(b, int64(n)) - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject to upload a file seeked at a given offset. -func testPutObjectUploadSeekedObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, fileToUpload, contentType)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileToUpload": "", - "contentType": "binary/octet-stream", - } - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - - var tempfile *os.File - - if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { - tempfile, err = os.Open(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - args["fileToUpload"] = fileName - } else { - tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile create failed", err) - return - } - args["fileToUpload"] = tempfile.Name() - - // Generate 100kB data - if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - defer os.Remove(tempfile.Name()) - - // Seek back to the beginning of the file. - tempfile.Seek(0, 0) - } - var length = 100 * humanize.KiByte - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - offset := length / 2 - if _, err = tempfile.Seek(int64(offset), 0); err != nil { - logError(testName, function, args, startTime, "", "TempFile seek failed", err) - return - } - - n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err) - return - } - tempfile.Close() - - obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer obj.Close() - - n, err = obj.Seek(int64(offset), 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != int64(offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) - return - } - - n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests bucket re-create errors. -func testMakeBucketErrorV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - region := "eu-west-1" - args["bucketName"] = bucketName - args["region"] = region - - // Make a new bucket in 'eu-west-1'. - if err = c.MakeBucket(bucketName, region); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - if err = c.MakeBucket(bucketName, region); err == nil { - logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwiceV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject hidden contentType setting -func testFPutObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a temp file with 11*1024*1024 bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) - n, err := io.CopyN(file, r, 11*1024*1024) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Close the file pro-actively for windows. - err = file.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - // Set base object name - objectName := bucketName + "FPutObject" - args["objectName"] = objectName - args["fileName"] = file.Name() - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - - n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Add extension to temp file name - fileName := file.Name() - err = os.Rename(file.Name(), fileName+".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "Rename failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - args["fileName"] = fileName + ".gtar" - - n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Check headers - rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) - return - } - - rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) - return - } - - rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) - return - } - - rGTar, err = c.StatObjectWithContext(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - err = os.Remove(fileName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegionsV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { - args["bucketName"] = bucketName + ".withperiod" - args["region"] = "us-west-2" - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - n, err = r.Seek(offset, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - n, err = r.Seek(0, 1) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - _, err = r.Seek(offset, 2) - if err == nil { - logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) - return - } - n, err = r.Seek(-offset, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != st.Size-offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) - return - } - - var buffer1 bytes.Buffer - if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Seek again and read again. - n, err = r.Seek(offset-1, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != (offset - 1) { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) - return - } - - var buffer2 bytes.Buffer - if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - // Verify now lesser bytes. - if !bytes.Equal(buf[2047:], buffer2.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) - return - } - - // Read the data back - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - - // Read directly - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - m, err := r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, n) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, n+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(bucketName+"-copy", "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) - return - } - - r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - r.Close() - - // Copy Source - src := minio.NewSourceInfo(bucketName, objectName, nil) - args["source"] = src - - // Set copy conditions. - - // All invalid conditions first. - err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) - if err == nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) - return - } - err = src.SetMatchETagExceptCond("") - if err == nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) - return - } - - err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err) - return - } - err = src.SetMatchETagCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err) - return - } - - dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) - args["destination"] = dst - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // Perform the Copy - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) - return - } - - // Close all the readers. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.NewSourceInfo(bucketName, objectName, nil) - err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) - if err != nil { - logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) - return - } - err = src.SetMatchETagExceptCond(objInfo.ETag) - if err != nil { - logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err) - return - } - - // Perform the Copy which should fail - err = c.CopyObject(dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - if err = cleanupBucket(bucketName+"-copy", c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectErrorCasesWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Test that more than 10K source objects cannot be - // concatenated. - srcArr := [10001]minio.SourceInfo{} - srcSlice := srcArr[:] - dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - args["destination"] = dst - // Just explain about srcArr in args["sourceList"] - // to stop having 10,001 null headers logged - args["sourceList"] = "source array of 10,001 elements" - if err := c.ComposeObject(dst, srcSlice); err == nil { - logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) - return - } else if err.Error() != "There must be as least one and up to 10000 source objects." { - logError(testName, function, args, startTime, "", "Got unexpected error", err) - return - } - - // Create a source with invalid offset spec and check that - // error is returned: - // 1. Create the source object. - const badSrcSize = 5 * 1024 * 1024 - buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // 2. Set invalid range spec on the object (going beyond - // object size) - badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) - err = badSrc.SetRange(1, badSrcSize) - if err != nil { - logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err) - return - } - // 3. ComposeObject call should fail. - if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { - logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) - return - } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { - logError(testName, function, args, startTime, "", "Got invalid error", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCasesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -func testComposeMultipleSources(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{ - "destination": "", - "sourceList": "", - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Upload a small source object - const srcSize = 1024 * 1024 * 5 - buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // We will append 10 copies of the object. - srcs := []minio.SourceInfo{} - for i := 0; i < 10; i++ { - srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) - } - // make the last part very small - err = srcs[9].SetRange(0, 0) - if err != nil { - logError(testName, function, args, startTime, "", "SetRange failed", err) - return - } - args["sourceList"] = srcs - - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) - args["destination"] = dst - - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - err = c.ComposeObject(dst, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objProps.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test concatenating multiple objects objects -func testCompose10KSourcesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -func testEncryptedEmptyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 0 - var buf []byte // Empty buffer - args["objectName"] = "object" - _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - // 2. Test CopyObject for an empty object - dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - srcInfo := minio.NewSourceInfo(bucketName, "object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) - return - } - - // 3. Test Key rotation - newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) - dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil) - if err != nil { - args["objectName"] = "new-object" - function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)" - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse) - if err = c.CopyObject(dstInfo, srcInfo); err != nil { - function = "CopyObject(dstInfo, srcInfo)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) - return - } - - // 4. Download the object. - reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) - return - } - // Delete all objects and buckets - delete(args, "objectName") - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - var srcEncryption, dstEncryption encrypt.ServerSide - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ServerSideEncryption: sseSrc, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - if sseSrc != nil && sseSrc.Type() != encrypt.S3 { - srcEncryption = sseSrc - } - - // 2. copy object and change encryption key - src := minio.NewSourceInfo(bucketName, "srcObject", srcEncryption) - args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - if sseDst != nil && sseDst.Type() != encrypt.S3 { - dstEncryption = sseDst - } - // 3. get copied object and check if content is equal - coreClient := minio.Core{c} - reader, _, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test key rotation for source object in-place. - var newSSE encrypt.ServerSide - if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { - newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key - } - if sseSrc != nil && sseSrc.Type() == encrypt.S3 { - newSSE = encrypt.NewSSE() - } - if newSSE != nil { - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Get copied object and check if content is equal - reader, _, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - // Test in-place decryption. - dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - src = minio.NewSourceInfo(bucketName, "srcObject", newSSE) - args["source"] = src - err = c.CopyObject(dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) - return - } - } - - // Get copied decrypted object and check if content is equal - reader, _, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test encrypted copy object -func testUnencryptedToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -func testDecryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" - if err = c.MakeBucket(bucketName, "us-east-1"); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) - _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ - ServerSideEncryption: encryption, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption)) - args["source"] = src - dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - args["destination"] = dst - - if err = c.CopyObject(dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testSSECMultipartEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 6MB of data - buf := bytes.Repeat([]byte("abcdef"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - // Upload a 6MB object using multipart mechanism - uploadID, err := c.NewMultipartUpload(bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - var completeParts []minio.CompletePart - - part, err := c.PutObjectPart(bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - part, err = c.PutObjectPart(bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(bucketName, objectName, uploadID, completeParts) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: srcencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err = c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (6*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 6*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 6*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) - } - - getOpts.SetRange(6*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 6*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:6*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) - } - if getBuf[6*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation -func testSSECEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy -func testSSECEncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - var dstencryption encrypt.ServerSide - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy -func testSSECEncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part -func testUnencryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testUnencryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testUnencryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part -func testSSES3EncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcEncryption := encrypt.NewSSE() - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testSSES3EncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testSSES3EncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ - "Content-Type": "binary/octet-stream", - }, srcEncryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - } - - if objInfo.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf := make([]byte, 5*1024*1024) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = io.ReadFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} -func testUserMetadataCopying() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testUserMetadataCopyingWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - // 1. create a client encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(http.Header) - metadata.Set("x-amz-meta-myheader", "myvalue") - m := make(map[string]string) - m["x-amz-meta-myheader"] = "myvalue" - _, err = c.PutObject(bucketName, "srcObject", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) - return - } - if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 2. create source - src := minio.NewSourceInfo(bucketName, "srcObject", nil) - // 2.1 create destination with metadata set - dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - // 3. Check that copying to an object with metadata set resets - // the headers on the copy. - args["source"] = src - args["destination"] = dst1 - err = c.CopyObject(dst1, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders := make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 4. create destination with no metadata set and same source - dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - src = minio.NewSourceInfo(bucketName, "srcObject", nil) - - // 5. Check that copying to an object with no metadata set, - // copies metadata. - args["source"] = src - args["destination"] = dst2 - err = c.CopyObject(dst2, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders = metadata - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 6. Compose a pair of sources. - srcs := []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - function = "ComposeObject(destination, sources)" - args["source"] = srcs - args["destination"] = dst3 - err = c.ComposeObject(dst3, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 7. Compose a pair of sources with dest user metadata set. - srcs = []minio.SourceInfo{ - minio.NewSourceInfo(bucketName, "srcObject", nil), - minio.NewSourceInfo(bucketName, "srcObject", nil), - } - dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) - if err != nil { - logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err) - return - } - - function = "ComposeObject(destination, sources)" - args["source"] = srcs - args["destination"] = dst4 - err = c.ComposeObject(dst4, srcs) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - expectedHeaders = make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testUserMetadataCopyingV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testStorageClassMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClass") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - _, err = c.PutObject(bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassInvalidMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassInvalidMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassMetadataCopyObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataCopyObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) - args["bucket"] = bucketName - args["object"] = object - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) - - // Put an object with RRS Storage class - _, err = c.PutObject(bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil) - dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil) - if err = c.CopyObject(dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClassCopy") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - // Put an object with Standard Storage class - _, err = c.PutObject(bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil) - dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil) - if err = c.CopyObject(dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) - } - // Fetch the meta data of copied object - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with size -1 byte object. -func testPutObjectNoLengthV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - objectName := bucketName + "unique" - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - args["size"] = bufSize - - // Upload an object. - n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - if n != int64(bufSize) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put objects of unknown size. -func testPutObjectsUnknownV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Issues are revealed by trying to upload multiple files of unknown size - // sequentially (on 4GB machines) - for i := 1; i <= 4; i++ { - // Simulate that we could be receiving byte slices of data that we want - // to upload as a file - rpipe, wpipe := io.Pipe() - defer rpipe.Close() - go func() { - b := []byte("test") - wpipe.Write(b) - wpipe.Close() - }() - - // Upload the object. - objectName := fmt.Sprintf("%sunique%d", bucketName, i) - args["objectName"] = objectName - - n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - args["size"] = n - if n != int64(4) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err) - return - } - - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with 0 byte object. -func testPutObject0ByteV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": 0, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - objectName := bucketName + "unique" - args["objectName"] = objectName - args["opts"] = minio.PutObjectOptions{} - - // Upload an object. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - if n != 0 { - logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCases() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -// Test concatenating 10K objects -func testCompose10KSources() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -// Tests comprehensive list of all methods. -func testFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctionalV2()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - // Make a new bucket. - function = "MakeBucket(bucketName, location)" - functionAll = "MakeBucket(bucketName, location)" - args = map[string]interface{}{ - "bucketName": bucketName, - "location": location, - } - err = c.MakeBucket(bucketName, location) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, bucketPolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets() - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) - return - } - - objectNameNoLength := objectName + "-nolength" - args["objectName"] = objectNameNoLength - n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - if n != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) - return - } - - incompObjNotFound := true - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FgetObject failed", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - // Verify if presigned url works. - resp, err := http.Head(presignedHeadURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "Got empty ETag", err) - return - } - resp.Body.Close() - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - // Generate presigned GET object url. - args["reqParams"] = reqParams - presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - // Verify if presigned url works. - resp, err = http.Get(presignedGetURL.String()) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - // Verify content disposition. - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - // Generate data more than 32K - buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - - req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, - } - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - } - newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - if err = os.Remove(fileName); err != nil { - logError(testName, function, args, startTime, "", "File remove failed", err) - return - } - if err = os.Remove(fileName + "-f"); err != nil { - logError(testName, function, args, startTime, "", "File removes failed", err) - return - } - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test get object with GetObjectWithContext -func testGetObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) - return - } - - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - - // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "object Close() call failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObjectWithContext -func testFGetObjectWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err) - return - } - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object ACLs with GetObjectACLWithContext -func testGetObjectACLWithContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectACLWithContext(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Instantiate new minio client object. - c, err := minio.NewV4( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData := map[string]string{ - "X-Amz-Acl": "public-read-write", - } - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr := c.GetObjectACLWithContext(ctx, bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail", getObjectACLErr) - return - } - - s, ok := objectInfo.Metadata["X-Amz-Acl"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Acl\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "public-read-write" { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) - return - } - - bufSize = dataFileMap["datafile-1-MB"] - var reader2 = getDataReader("datafile-1-MB") - defer reader2.Close() - // Save the data - objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData = map[string]string{ - "X-Amz-Grant-Read": "id=fooread@minio.go", - "X-Amz-Grant-Write": "id=foowrite@minio.go", - } - - _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr = c.GetObjectACLWithContext(ctx, bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail", getObjectACLErr) - return - } - - if len(objectInfo.Metadata) != 3 { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Grant-Read\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "fooread@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail unable to find \"X-Amz-Grant-Write\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "foowrite@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACLWithContext fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject with context to see if request cancellation is honored for V2. -func testPutObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer c.RemoveBucket(bucketName) - bufSize := dataFileMap["datatfile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - args["ctx"] = ctx - args["size"] = bufSize - defer cancel() - - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with GetObjectWithContext -func testGetObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectWithContext(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err) - return - } - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", " object Close() call failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObjectWithContext -func testFGetObjectWithContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.NewV2( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - bufSize := dataFileMap["datatfile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err) - return - } - - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test list object v1 and V2 -func testListObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New( - os.Getenv(serverEndpoint), - os.Getenv(accessKey), - os.Getenv(secretKey), - mustParseBool(os.Getenv(enableHTTPS)), - ) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(bucketName, "us-east-1") - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - testObjects := []struct { - name string - storageClass string - }{ - // \x17 is a forbidden character in a xml document - {"foo\x17bar", "STANDARD"}, - // Special characters - {"foo bar", "STANDARD"}, - {"foo-%", "STANDARD"}, - {"random-object-1", "STANDARD"}, - {"random-object-2", "REDUCED_REDUNDANCY"}, - } - - for i, object := range testObjects { - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(bucketName, object.name, reader, int64(bufSize), - minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) - if err != nil { - logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) - return - } - } - - testList := func(listFn func(string, string, bool, <-chan struct{}) <-chan minio.ObjectInfo, bucket string) { - // Create a done channel to control 'ListObjects' go routine. - doneCh := make(chan struct{}) - // Exit cleanly upon return. - defer close(doneCh) - - var objCursor int - - // check for object name and storage-class from listing object result - for objInfo := range listFn(bucket, "", true, doneCh) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) - return - } - if objInfo.Key != testObjects[objCursor].name { - logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) - } - if objInfo.StorageClass != testObjects[objCursor].storageClass { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - objCursor++ - } - - if objCursor != len(testObjects) { - logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) - } - } - - testList(c.ListObjects, bucketName) - testList(c.ListObjectsV2, bucketName) - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "Cleanup failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Convert string to bool and always return false if any error -func mustParseBool(str string) bool { - b, err := strconv.ParseBool(str) - if err != nil { - return false - } - return b -} - -func main() { - // Output to stdout instead of the default stderr - log.SetOutput(os.Stdout) - // create custom formatter - mintFormatter := mintJSONFormatter{} - // set custom formatter - log.SetFormatter(&mintFormatter) - // log Info or above -- success cases are Info level, failures are Fatal level - log.SetLevel(log.InfoLevel) - - tls := mustParseBool(os.Getenv(enableHTTPS)) - kmsEnabled := mustParseBool(os.Getenv(enableKMS)) - // execute tests - if isFullMode() { - testMakeBucketErrorV2() - testGetObjectClosedTwiceV2() - testFPutObjectV2() - testMakeBucketRegionsV2() - testGetObjectReadSeekFunctionalV2() - testGetObjectReadAtFunctionalV2() - testCopyObjectV2() - testFunctionalV2() - testComposeObjectErrorCasesV2() - testCompose10KSourcesV2() - testUserMetadataCopyingV2() - testPutObject0ByteV2() - testPutObjectNoLengthV2() - testPutObjectsUnknownV2() - testGetObjectWithContextV2() - testFPutObjectWithContextV2() - testFGetObjectWithContextV2() - testPutObjectWithContextV2() - testMakeBucketError() - testMakeBucketRegions() - testPutObjectWithMetadata() - testPutObjectReadAt() - testPutObjectStreaming() - testGetObjectSeekEnd() - testGetObjectClosedTwice() - testRemoveMultipleObjects() - testFPutObjectMultipart() - testFPutObject() - testGetObjectReadSeekFunctional() - testGetObjectReadAtFunctional() - testGetObjectReadAtWhenEOFWasReached() - testPresignedPostPolicy() - testCopyObject() - testComposeObjectErrorCases() - testCompose10KSources() - testUserMetadataCopying() - testBucketNotification() - testFunctional() - testGetObjectModified() - testPutObjectUploadSeekedObject() - testGetObjectWithContext() - testFPutObjectWithContext() - testFGetObjectWithContext() - - testGetObjectACLWithContext() - - testPutObjectWithContext() - testStorageClassMetadataPutObject() - testStorageClassInvalidMetadataPutObject() - testStorageClassMetadataCopyObject() - testPutObjectWithContentLanguage() - testListObjects() - - // SSE-C tests will only work over TLS connection. - if tls { - testSSECEncryptionPutGet() - testSSECEncryptionFPut() - testSSECEncryptedGetObjectReadAtFunctional() - testSSECEncryptedGetObjectReadSeekFunctional() - testEncryptedCopyObjectV2() - testEncryptedSSECToSSECCopyObject() - testEncryptedSSECToUnencryptedCopyObject() - testUnencryptedToSSECCopyObject() - testUnencryptedToUnencryptedCopyObject() - testEncryptedEmptyObject() - testDecryptedCopyObject() - testSSECEncryptedToSSECCopyObjectPart() - testSSECMultipartEncryptedToSSECCopyObjectPart() - testSSECEncryptedToUnencryptedCopyPart() - testUnencryptedToSSECCopyObjectPart() - testUnencryptedToUnencryptedCopyPart() - if kmsEnabled { - testSSES3EncryptionPutGet() - testSSES3EncryptionFPut() - testSSES3EncryptedGetObjectReadAtFunctional() - testSSES3EncryptedGetObjectReadSeekFunctional() - testEncryptedSSECToSSES3CopyObject() - testEncryptedSSES3ToSSECCopyObject() - testEncryptedSSES3ToSSES3CopyObject() - testEncryptedSSES3ToUnencryptedCopyObject() - testUnencryptedToSSES3CopyObject() - testSSECEncryptedToSSES3CopyObjectPart() - testUnencryptedToSSES3CopyObjectPart() - testSSES3EncryptedToSSECCopyObjectPart() - testSSES3EncryptedToUnencryptedCopyPart() - testSSES3EncryptedToSSES3CopyObjectPart() - } - } - } else { - testFunctional() - testFunctionalV2() - } -} diff --git a/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go b/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go deleted file mode 100644 index 2b8a4d3b05..0000000000 --- a/vendor/github.com/prometheus/alertmanager/asset/asset_generate.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -package main - -import ( - "log" - "time" - - "github.com/shurcooL/vfsgen" - - "github.com/prometheus/alertmanager/asset" - "github.com/prometheus/alertmanager/pkg/modtimevfs" -) - -func main() { - fs := modtimevfs.New(asset.Assets, time.Unix(1, 0)) - err := vfsgen.Generate(fs, vfsgen.Options{ - PackageName: "asset", - BuildTags: "!dev", - VariableName: "Assets", - }) - if err != nil { - log.Fatalln(err) - } -} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index 748fa8f2c1..14f0ecb710 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -3,7 +3,6 @@ package server import ( "flag" "fmt" - "math" "net" "net/http" _ "net/http/pprof" // anonymous import to get the pprof handler registered @@ -18,7 +17,6 @@ import ( "golang.org/x/net/context" "golang.org/x/net/netutil" "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" "github.com/weaveworks/common/httpgrpc" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" @@ -51,14 +49,9 @@ type Config struct { GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"` HTTPMiddleware []middleware.Interface `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` - GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` - GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` - GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` - GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` - GRPCServerTime time.Duration `yaml:"grpc_server_keepalive_time"` - GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` + GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` + GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` LogLevel logging.Level `yaml:"log_level"` Log logging.Interface `yaml:"-"` @@ -66,8 +59,6 @@ type Config struct { PathPrefix string `yaml:"http_path_prefix"` } -var infinty = time.Duration(math.MaxInt64) - // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.HTTPListenAddress, "server.http-listen-address", "", "HTTP server listen address.") @@ -84,11 +75,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).") f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)") - f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity") - f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity") - f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") - f.DurationVar(&cfg.GRPCServerTime, "server.grpc.keepalive.time", time.Hour*2, "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h") - f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") cfg.LogLevel.RegisterFlags(f) } @@ -165,14 +151,6 @@ func New(cfg Config) (*Server, error) { } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) - grpcKeepAliveOptions := keepalive.ServerParameters{ - MaxConnectionIdle: cfg.GRPCServerMaxConnectionIdle, - MaxConnectionAge: cfg.GRPCServerMaxConnectionAge, - MaxConnectionAgeGrace: cfg.GRPCServerMaxConnectionAgeGrace, - Time: cfg.GRPCServerTime, - Timeout: cfg.GRPCServerTimeout, - } - grpcOptions := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( grpcMiddleware..., @@ -180,7 +158,6 @@ func New(cfg Config) (*Server, error) { grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( grpcStreamMiddleware..., )), - grpc.KeepaliveParams(grpcKeepAliveOptions), grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go deleted file mode 100644 index 2a7661c27b..0000000000 --- a/vendor/golang.org/x/net/internal/iana/gen.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates internet protocol constants and tables by -// reading IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - "strings" -) - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", - parseDSCPRegistry, - }, - { - "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", - parseProtocolNumbers, - }, - { - "https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", - parseAddrFamilyNumbers, - }, -} - -func main() { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") - fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) - os.Exit(1) - } - if err := r.parse(&bb, resp.Body); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile("const.go", b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func parseDSCPRegistry(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var dr dscpRegistry - if err := dec.Decode(&dr); err != nil { - return err - } - fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) - fmt.Fprintf(w, "const (\n") - for _, dr := range dr.escapeDSCP() { - fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value) - fmt.Fprintf(w, "// %s\n", dr.OrigName) - } - for _, er := range dr.escapeECN() { - fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value) - fmt.Fprintf(w, "// %s\n", er.OrigDescr) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type dscpRegistry struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Note string `xml:"note"` - Registries []struct { - Title string `xml:"title"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Name string `xml:"name"` - Space string `xml:"space"` - } `xml:"record"` - } `xml:"registry"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonDSCPRecord struct { - OrigName string - Name string - Value int -} - -func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord { - var drs []canonDSCPRecord - for _, preg := range drr.Registries { - if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") { - continue - } - for _, reg := range preg.Registries { - if !strings.Contains(reg.Title, "Pool 1 Codepoints") { - continue - } - drs = make([]canonDSCPRecord, len(reg.Records)) - sr := strings.NewReplacer( - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, dr := range reg.Records { - s := strings.TrimSpace(dr.Name) - drs[i].OrigName = s - drs[i].Name = sr.Replace(s) - n, err := strconv.ParseUint(dr.Space, 2, 8) - if err != nil { - continue - } - drs[i].Value = int(n) << 2 - } - } - } - return drs -} - -type canonECNRecord struct { - OrigDescr string - Descr string - Value int -} - -func (drr *dscpRegistry) escapeECN() []canonECNRecord { - var ers []canonECNRecord - for _, reg := range drr.Registries { - if !strings.Contains(reg.Title, "ECN Field") { - continue - } - ers = make([]canonECNRecord, len(reg.Records)) - sr := strings.NewReplacer( - "Capable", "", - "Not-ECT", "", - "ECT(1)", "", - "ECT(0)", "", - "CE", "", - "(", "", - ")", "", - "+", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, er := range reg.Records { - s := strings.TrimSpace(er.Descr) - ers[i].OrigDescr = s - ss := strings.Split(s, " ") - if len(ss) > 1 { - ers[i].Descr = strings.Join(ss[1:], " ") - } else { - ers[i].Descr = ss[0] - } - ers[i].Descr = sr.Replace(er.Descr) - n, err := strconv.ParseUint(er.Value, 2, 8) - if err != nil { - continue - } - ers[i].Value = int(n) - } - } - return ers -} - -func parseProtocolNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var pn protocolNumbers - if err := dec.Decode(&pn); err != nil { - return err - } - prs := pn.escape() - prs = append([]canonProtocolRecord{{ - Name: "IP", - Descr: "IPv4 encapsulation, pseudo protocol number", - Value: 0, - }}, prs...) - fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) - s := pr.Descr - if s == "" { - s = pr.OrigName - } - fmt.Fprintf(w, "// %s\n", s) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type protocolNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonProtocolRecord struct { - OrigName string - Name string - Descr string - Value int -} - -func (pn *protocolNumbers) escape() []canonProtocolRecord { - prs := make([]canonProtocolRecord, len(pn.Records)) - sr := strings.NewReplacer( - "-in-", "in", - "-within-", "within", - "-over-", "over", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range pn.Records { - if strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "deprecated") { - continue - } - prs[i].OrigName = pr.Name - s := strings.TrimSpace(pr.Name) - switch pr.Name { - case "ISIS over IPv4": - prs[i].Name = "ISIS" - case "manet": - prs[i].Name = "MANET" - default: - prs[i].Name = sr.Replace(s) - } - ss := strings.Split(pr.Descr, "\n") - for i := range ss { - ss[i] = strings.TrimSpace(ss[i]) - } - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} - -func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var afn addrFamilylNumbers - if err := dec.Decode(&afn); err != nil { - return err - } - afrs := afn.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) - fmt.Fprintf(w, "const (\n") - for _, afr := range afrs { - if afr.Name == "" { - continue - } - fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) - fmt.Fprintf(w, "// %s\n", afr.Descr) - } - fmt.Fprintf(w, ")\n") - return nil -} - -type addrFamilylNumbers struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - RegTitle string `xml:"registry>title"` - Note string `xml:"registry>note"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"registry>record"` -} - -type canonAddrFamilyRecord struct { - Name string - Descr string - Value int -} - -func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { - afrs := make([]canonAddrFamilyRecord, len(afn.Records)) - sr := strings.NewReplacer( - "IP version 4", "IPv4", - "IP version 6", "IPv6", - "Identifier", "ID", - "-", "", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, afr := range afn.Records { - if strings.Contains(afr.Descr, "Unassigned") || - strings.Contains(afr.Descr, "Reserved") { - continue - } - afrs[i].Descr = afr.Descr - s := strings.TrimSpace(afr.Descr) - switch s { - case "IP (IP version 4)": - afrs[i].Name = "IPv4" - case "IP6 (IP version 6)": - afrs[i].Name = "IPv6" - case "AFI for L2VPN information": - afrs[i].Name = "L2VPN" - case "E.164 with NSAP format subaddress": - afrs[i].Name = "E164withSubaddress" - case "MT IP: Multi-Topology IP version 4": - afrs[i].Name = "MTIPv4" - case "MAC/24": - afrs[i].Name = "MACFinal24bits" - case "MAC/40": - afrs[i].Name = "MACFinal40bits" - case "IPv6/64": - afrs[i].Name = "IPv6Initial64bits" - default: - n := strings.Index(s, "(") - if n > 0 { - s = s[:n] - } - n = strings.Index(s, ":") - if n > 0 { - s = s[:n] - } - afrs[i].Name = sr.Replace(s) - } - afrs[i].Value, _ = strconv.Atoi(afr.Value) - } - return afrs -} diff --git a/vendor/golang.org/x/net/internal/socket/defs_aix.go b/vendor/golang.org/x/net/internal/socket/defs_aix.go deleted file mode 100644 index ae1b21c5e1..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_aix.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_darwin.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go deleted file mode 100644 index 85bb7450b0..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include -#include - -#define _GNU_SOURCE -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go deleted file mode 100644 index 5bfdd4676c..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type mmsghdr C.struct_mmsghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go deleted file mode 100644 index b780bc67ab..0000000000 --- a/vendor/golang.org/x/net/internal/socket/defs_solaris.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package socket - -/* -#include - -#include -*/ -import "C" - -type iovec C.struct_iovec - -type msghdr C.struct_msghdr - -type cmsghdr C.struct_cmsghdr - -type sockaddrInet C.struct_sockaddr_in - -type sockaddrInet6 C.struct_sockaddr_in6 - -const ( - sizeofIovec = C.sizeof_struct_iovec - sizeofMsghdr = C.sizeof_struct_msghdr - sizeofCmsghdr = C.sizeof_struct_cmsghdr - - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 -) diff --git a/vendor/golang.org/x/net/ipv4/defs_aix.go b/vendor/golang.org/x/net/ipv4/defs_aix.go deleted file mode 100644 index 0f37211c64..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_aix.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - // IP_RECVIF is defined on AIX but doesn't work. - // IP_RECVINTERFACE must be used instead. - sysIP_RECVIF = C.IP_RECVINTERFACE - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go deleted file mode 100644 index c8f2e05b81..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_darwin.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_STRIPHDR = C.IP_STRIPHDR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go deleted file mode 100644 index f30544ea24..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go deleted file mode 100644 index 4dd57d8653..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_freebsd.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_SENDSRCADDR = C.IP_SENDSRCADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_ONESBCAST = C.IP_ONESBCAST - sysIP_BINDANY = C.IP_BINDANY - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_MINTTL = C.IP_MINTTL - sysIP_DONTFRAG = C.IP_DONTFRAG - sysIP_RECVTOS = C.IP_RECVTOS - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go deleted file mode 100644 index beb11071ad..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_linux.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_PKTOPTIONS = C.IP_PKTOPTIONS - sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER - sysIP_RECVERR = C.IP_RECVERR - sysIP_RECVTTL = C.IP_RECVTTL - sysIP_RECVTOS = C.IP_RECVTOS - sysIP_MTU = C.IP_MTU - sysIP_FREEBIND = C.IP_FREEBIND - sysIP_TRANSPARENT = C.IP_TRANSPARENT - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR - sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR - sysIP_MINTTL = C.IP_MINTTL - sysIP_NODEFRAG = C.IP_NODEFRAG - sysIP_UNICAST_IF = C.IP_UNICAST_IF - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_MSFILTER = C.IP_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL - - //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT - //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT - //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO - //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE - //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE - //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT - - sysICMP_FILTER = C.ICMP_FILTER - - sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE - sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL - sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP - sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 - sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS - sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqn = C.sizeof_struct_ip_mreqn - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPFilter = C.sizeof_struct_icmp_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type sockExtendedErr C.struct_sock_extended_err - -type ipMreq C.struct_ip_mreq - -type ipMreqn C.struct_ip_mreqn - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpFilter C.struct_icmp_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go deleted file mode 100644 index 8f8af1b899..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_netbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go deleted file mode 100644 index 8f8af1b899..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_openbsd.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - - sizeofIPMreq = C.sizeof_struct_ip_mreq -) - -type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go deleted file mode 100644 index aeb33e9c8f..0000000000 --- a/vendor/golang.org/x/net/ipv4/defs_solaris.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in_addr [4]byte /* in_addr */ - -package ipv4 - -/* -#include - -#include -*/ -import "C" - -const ( - sysIP_OPTIONS = C.IP_OPTIONS - sysIP_HDRINCL = C.IP_HDRINCL - sysIP_TOS = C.IP_TOS - sysIP_TTL = C.IP_TTL - sysIP_RECVOPTS = C.IP_RECVOPTS - sysIP_RECVRETOPTS = C.IP_RECVRETOPTS - sysIP_RECVDSTADDR = C.IP_RECVDSTADDR - sysIP_RETOPTS = C.IP_RETOPTS - sysIP_RECVIF = C.IP_RECVIF - sysIP_RECVSLLA = C.IP_RECVSLLA - sysIP_RECVTTL = C.IP_RECVTTL - - sysIP_MULTICAST_IF = C.IP_MULTICAST_IF - sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL - sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP - sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP - sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP - sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE - sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE - sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP - sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP - sysIP_NEXTHOP = C.IP_NEXTHOP - - sysIP_PKTINFO = C.IP_PKTINFO - sysIP_RECVPKTINFO = C.IP_RECVPKTINFO - sysIP_DONTFRAG = C.IP_DONTFRAG - - sysIP_BOUND_IF = C.IP_BOUND_IF - sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC - sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL - sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF - - sysIP_REUSEADDR = C.IP_REUSEADDR - sysIP_DONTROUTE = C.IP_DONTROUTE - sysIP_BROADCAST = C.IP_BROADCAST - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet = C.sizeof_struct_sockaddr_in - sizeofInetPktinfo = C.sizeof_struct_in_pktinfo - - sizeofIPMreq = C.sizeof_struct_ip_mreq - sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet C.struct_sockaddr_in - -type inetPktinfo C.struct_in_pktinfo - -type ipMreq C.struct_ip_mreq - -type ipMreqSource C.struct_ip_mreq_source - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go deleted file mode 100644 index 1bb1737f67..0000000000 --- a/vendor/golang.org/x/net/ipv4/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", - parseICMPv4Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "package ipv4\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv4Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv4Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigDescr) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Descr == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv4Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Descr string `xml:"description"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv4ParamRecord struct { - OrigDescr string - Descr string - Value int -} - -func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Descr, "Reserved") || - strings.Contains(pr.Descr, "Unassigned") || - strings.Contains(pr.Descr, "Deprecated") || - strings.Contains(pr.Descr, "Experiment") || - strings.Contains(pr.Descr, "experiment") { - continue - } - ss := strings.Split(pr.Descr, "\n") - if len(ss) > 1 { - prs[i].Descr = strings.Join(ss, " ") - } else { - prs[i].Descr = ss[0] - } - s := strings.TrimSpace(prs[i].Descr) - prs[i].OrigDescr = s - prs[i].Descr = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/ipv6/defs_aix.go b/vendor/golang.org/x/net/ipv6/defs_aix.go deleted file mode 100644 index ea396a3cb8..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_aix.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go deleted file mode 100644 index 55ddc116fc..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_darwin.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#define __APPLE_USE_RFC_3542 -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go deleted file mode 100644 index 27a1d1d6f7..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go deleted file mode 100644 index 53e625389a..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_freebsd.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR - - sysIPV6_BINDANY = C.IPV6_BINDANY - - sysIPV6_MSFILTER = C.IPV6_MSFILTER - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go deleted file mode 100644 index 3308cb2c38..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_linux.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -const ( - sysIPV6_ADDRFORM = C.IPV6_ADDRFORM - sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO - sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS - sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS - sysIPV6_2292RTHDR = C.IPV6_2292RTHDR - sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_FLOWINFO = C.IPV6_FLOWINFO - - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP - sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_MSFILTER = C.MCAST_MSFILTER - sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT - sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER - sysIPV6_MTU = C.IPV6_MTU - sysIPV6_RECVERR = C.IPV6_RECVERR - sysIPV6_V6ONLY = C.IPV6_V6ONLY - sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST - sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST - - //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT - //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT - //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO - //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE - //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE - //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT - - sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR - sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_TCLASS = C.IPV6_TCLASS - - sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES - - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - - sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT - - sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR - sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR - sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT - sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF - - sysICMPV6_FILTER = C.ICMPV6_FILTER - - sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK - sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS - sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS - sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY - - sysSOL_SOCKET = C.SOL_SOCKET - sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER - - sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter - - sizeofSockFprog = C.sizeof_struct_sock_fprog -) - -type kernelSockaddrStorage C.struct___kernel_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6FlowlabelReq C.struct_in6_flowlabel_req - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter - -type sockFProg C.struct_sock_fprog - -type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go deleted file mode 100644 index be9ceb9cc0..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_netbsd.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go deleted file mode 100644 index 177ddf87d2..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_openbsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - sysIPV6_PORTRANGE = C.IPV6_PORTRANGE - sysICMP6_FILTER = C.ICMP6_FILTER - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - - sysIPV6_PATHMTU = C.IPV6_PATHMTU - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - sysIPV6_RTHDR = C.IPV6_RTHDR - - sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL - sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL - sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL - sysIPSEC6_OUTSA = C.IPSEC6_OUTSA - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - - sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL - sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL - - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_PIPEX = C.IPV6_PIPEX - - sysIPV6_RTABLE = C.IPV6_RTABLE - - sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT - sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH - sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW - - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go deleted file mode 100644 index 0f8ce2b46a..0000000000 --- a/vendor/golang.org/x/net/ipv6/defs_solaris.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package ipv6 - -/* -#include - -#include -#include -*/ -import "C" - -const ( - sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS - sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF - sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS - sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP - sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP - sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP - - sysIPV6_PKTINFO = C.IPV6_PKTINFO - - sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT - sysIPV6_NEXTHOP = C.IPV6_NEXTHOP - sysIPV6_HOPOPTS = C.IPV6_HOPOPTS - sysIPV6_DSTOPTS = C.IPV6_DSTOPTS - - sysIPV6_RTHDR = C.IPV6_RTHDR - sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS - - sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO - sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT - sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS - - sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR - - sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS - - sysIPV6_CHECKSUM = C.IPV6_CHECKSUM - sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS - sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU - sysIPV6_DONTFRAG = C.IPV6_DONTFRAG - sysIPV6_SEC_OPT = C.IPV6_SEC_OPT - sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES - sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU - sysIPV6_PATHMTU = C.IPV6_PATHMTU - sysIPV6_TCLASS = C.IPV6_TCLASS - sysIPV6_V6ONLY = C.IPV6_V6ONLY - - sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS - - sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP - sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP - sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE - sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE - sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP - sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP - - sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME - sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA - sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC - sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP - sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA - sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA - - sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK - sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT - sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK - sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT - sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK - sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT - - sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK - - sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT - - sysIPV6_BOUND_IF = C.IPV6_BOUND_IF - sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC - - sysICMP6_FILTER = C.ICMP6_FILTER - - sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage - sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo - - sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - sizeofGroupReq = C.sizeof_struct_group_req - sizeofGroupSourceReq = C.sizeof_struct_group_source_req - - sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -type sockaddrStorage C.struct_sockaddr_storage - -type sockaddrInet6 C.struct_sockaddr_in6 - -type inet6Pktinfo C.struct_in6_pktinfo - -type ipv6Mtuinfo C.struct_ip6_mtuinfo - -type ipv6Mreq C.struct_ipv6_mreq - -type groupReq C.struct_group_req - -type groupSourceReq C.struct_group_source_req - -type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go deleted file mode 100644 index 5885664fbc..0000000000 --- a/vendor/golang.org/x/net/ipv6/gen.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go - -// This program generates system adaptation constants and types, -// internet protocol constants and tables by reading template files -// and IANA protocol registries. -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strconv" - "strings" -) - -func main() { - if err := genzsys(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := geniana(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func genzsys() error { - defs := "defs_" + runtime.GOOS + ".go" - f, err := os.Open(defs) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - f.Close() - cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) - b, err := cmd.Output() - if err != nil { - return err - } - b, err = format.Source(b) - if err != nil { - return err - } - zsys := "zsys_" + runtime.GOOS + ".go" - switch runtime.GOOS { - case "freebsd", "linux": - zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" - } - if err := ioutil.WriteFile(zsys, b, 0644); err != nil { - return err - } - return nil -} - -var registries = []struct { - url string - parse func(io.Writer, io.Reader) error -}{ - { - "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", - parseICMPv6Parameters, - }, -} - -func geniana() error { - var bb bytes.Buffer - fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") - fmt.Fprintf(&bb, "package ipv6\n\n") - for _, r := range registries { - resp, err := http.Get(r.url) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) - } - if err := r.parse(&bb, resp.Body); err != nil { - return err - } - fmt.Fprintf(&bb, "\n") - } - b, err := format.Source(bb.Bytes()) - if err != nil { - return err - } - if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { - return err - } - return nil -} - -func parseICMPv6Parameters(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - var icp icmpv6Parameters - if err := dec.Decode(&icp); err != nil { - return err - } - prs := icp.escape() - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "const (\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) - fmt.Fprintf(w, "// %s\n", pr.OrigName) - } - fmt.Fprintf(w, ")\n\n") - fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) - fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") - for _, pr := range prs { - if pr.Name == "" { - continue - } - fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) - } - fmt.Fprintf(w, "}\n") - return nil -} - -type icmpv6Parameters struct { - XMLName xml.Name `xml:"registry"` - Title string `xml:"title"` - Updated string `xml:"updated"` - Registries []struct { - Title string `xml:"title"` - Records []struct { - Value string `xml:"value"` - Name string `xml:"name"` - } `xml:"record"` - } `xml:"registry"` -} - -type canonICMPv6ParamRecord struct { - OrigName string - Name string - Value int -} - -func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { - id := -1 - for i, r := range icp.Registries { - if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { - id = i - break - } - } - if id < 0 { - return nil - } - prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) - sr := strings.NewReplacer( - "Messages", "", - "Message", "", - "ICMP", "", - "+", "P", - "-", "", - "/", "", - ".", "", - " ", "", - ) - for i, pr := range icp.Registries[id].Records { - if strings.Contains(pr.Name, "Reserved") || - strings.Contains(pr.Name, "Unassigned") || - strings.Contains(pr.Name, "Deprecated") || - strings.Contains(pr.Name, "Experiment") || - strings.Contains(pr.Name, "experiment") { - continue - } - ss := strings.Split(pr.Name, "\n") - if len(ss) > 1 { - prs[i].Name = strings.Join(ss, " ") - } else { - prs[i].Name = ss[0] - } - s := strings.TrimSpace(prs[i].Name) - prs[i].OrigName = s - prs[i].Name = sr.Replace(s) - prs[i].Value, _ = strconv.Atoi(pr.Value) - } - return prs -} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index 372ffbb24c..0000000000 --- a/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,717 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go based on the authoritative -// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat -// -// The version is derived from -// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat -// and a human-readable form is at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" -// and -version "an explicit version string". - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 10 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -const ( - defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" - gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" -) - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - numICANNRules = 0 - - // validSuffixRE is used to check that the entries in the public suffix - // list are in canonical form (after Punycode encoding). Specifically, - // capital letters are not allowed. - validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) - dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) - - comments = flag.Bool("comments", false, "generate table.go comments, for debugging") - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - if *url != defaultURL { - return fmt.Errorf("-version was not specified, and the -url is not the default one") - } - sha, date, err := gitCommit() - if err != nil { - return err - } - *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - if len(rules) != 0 { - return fmt.Errorf(`expected no rules before "BEGIN ICANN DOMAINS"`) - } - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann, numICANNRules = false, len(rules) - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffixRE.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - if err := generate(printReal, &root, "table.go"); err != nil { - return err - } - if err := generate(printTest, &root, "table_test.go"); err != nil { - return err - } - return nil -} - -func generate(p func(io.Writer, *node) error, root *node, filename string) error { - buf := new(bytes.Buffer) - if err := p(buf, root); err != nil { - return err - } - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - return ioutil.WriteFile(filename, b, 0644) -} - -func gitCommit() (sha, date string, retErr error) { - res, err := http.Get(gitCommitURL) - if err != nil { - return "", "", err - } - if res.StatusCode != http.StatusOK { - return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) - } - defer res.Body.Close() - b, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if m := shaRE.FindSubmatch(b); m != nil { - sha = string(m[1]) - } - if m := dateRE.FindSubmatch(b); m != nil { - date = string(m[1]) - } - if sha == "" || date == "" { - retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) - } - return sha, date, retErr -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nconst numICANNRules = %d\n\nvar rules = [...]string{\n", numICANNRules) - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// If the table was generated with the -comments flag, there is a //-comment -// after each node's data. In it is the nodes-array indexes of the children, -// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - if *comments { - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } else { - fmt.Fprintf(w, "0x%x,\n", c) - } - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 6f7bb6edfb..0000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func writeASMFile(in string, fileName string, buildTags string) { - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build %s\n", buildTags) - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err := ioutil.WriteFile(fileName, out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write %s: %s", fileName, err) - } -} - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12") - - in1, err = ioutil.ReadFile("syscall_darwin.1_13.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.1_13.go: %s", err) - } - in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err) - } - - in = string(in1) + string(in2) - - writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13") -} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index eb4332059a..0000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index 9e540cc892..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3b..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c960099517..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b6..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909c..0000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd850..0000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede5..0000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692b..0000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d0..0000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index a121dc3368..0000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_ATTACH = C.PT_ATTACH - PTRACE_CONT = C.PT_CONTINUE - PTRACE_DETACH = C.PT_DETACH - PTRACE_GETFPREGS = C.PT_GETFPREGS - PTRACE_GETFSBASE = C.PT_GETFSBASE - PTRACE_GETLWPLIST = C.PT_GETLWPLIST - PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS - PTRACE_GETREGS = C.PT_GETREGS - PTRACE_GETXSTATE = C.PT_GETXSTATE - PTRACE_IO = C.PT_IO - PTRACE_KILL = C.PT_KILL - PTRACE_LWPEVENTS = C.PT_LWP_EVENTS - PTRACE_LWPINFO = C.PT_LWPINFO - PTRACE_SETFPREGS = C.PT_SETFPREGS - PTRACE_SETREGS = C.PT_SETREGS - PTRACE_SINGLESTEP = C.PT_STEP - PTRACE_TRACEME = C.PT_TRACE_ME -) - -const ( - PIOD_READ_D = C.PIOD_READ_D - PIOD_WRITE_D = C.PIOD_WRITE_D - PIOD_READ_I = C.PIOD_READ_I - PIOD_WRITE_I = C.PIOD_WRITE_I -) - -const ( - PL_FLAG_BORN = C.PL_FLAG_BORN - PL_FLAG_EXITED = C.PL_FLAG_EXITED - PL_FLAG_SI = C.PL_FLAG_SI -) - -const ( - TRAP_BRKPT = C.TRAP_BRKPT - TRAP_TRACE = C.TRAP_TRACE -) - -type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo - -type __Siginfo C.struct___siginfo - -type Sigset_t C.sigset_t - -type Reg C.struct_reg - -type FpReg C.struct_fpreg - -type PtraceIoDesc C.struct_ptrace_io_desc - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 4a96d72c37..0000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 775cb57dc8..0000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f9348..0000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169cc..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d6..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb9942894..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa9334..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900d..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go deleted file mode 100644 index 092277e1f6..0000000000 --- a/vendor/golang.org/x/text/width/gen.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// This program generates the trie for width operations. The generated table -// includes width category information as well as the normalization mappings. -package main - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "unicode/utf8" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" -) - -// See gen_common.go for flags. - -func main() { - gen.Init() - genTables() - genTests() - gen.Repackage("gen_trieval.go", "trieval.go", "width") - gen.Repackage("gen_common.go", "common_test.go", "width") -} - -func genTables() { - t := triegen.NewTrie("width") - // fold and inverse mappings. See mapComment for a description of the format - // of each entry. Add dummy value to make an index of 0 mean no mapping. - inverse := [][4]byte{{}} - mapping := map[[4]byte]int{[4]byte{}: 0} - - getWidthData(func(r rune, tag elem, alt rune) { - idx := 0 - if alt != 0 { - var buf [4]byte - buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) - s := string(r) - buf[buf[0]] ^= s[len(s)-1] - var ok bool - if idx, ok = mapping[buf]; !ok { - idx = len(mapping) - if idx > math.MaxUint8 { - log.Fatalf("Index %d does not fit in a byte.", idx) - } - mapping[buf] = idx - inverse = append(inverse, buf) - } - } - t.Insert(r, uint64(tag|elem(idx))) - }) - - w := &bytes.Buffer{} - gen.WriteUnicodeVersion(w) - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - - sz += writeMappings(w, inverse) - - fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) - - gen.WriteVersionedGoFile(*outputFile, "width", w.Bytes()) -} - -const inverseDataComment = ` -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8.` - -func writeMappings(w io.Writer, data [][4]byte) int { - fmt.Fprintln(w, inverseDataComment) - fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) - for _, x := range data { - fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) - } - fmt.Fprintln(w, "}") - return len(data) * 4 -} - -func genTests() { - w := &bytes.Buffer{} - fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") - getWidthData(func(r rune, tag elem, alt rune) { - if alt != 0 { - fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) - } - }) - fmt.Fprintln(w, "}") - gen.WriteGoFile("runes_test.go", "width", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go deleted file mode 100644 index 601e752684..0000000000 --- a/vendor/golang.org/x/text/width/gen_common.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This code is shared between the main code generator and the test code. - -import ( - "flag" - "log" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" -) - -var ( - outputFile = flag.String("out", "tables.go", "output file") -) - -var typeMap = map[string]elem{ - "A": tagAmbiguous, - "N": tagNeutral, - "Na": tagNarrow, - "W": tagWide, - "F": tagFullwidth, - "H": tagHalfwidth, -} - -// getWidthData calls f for every entry for which it is defined. -// -// f may be called multiple times for the same rune. The last call to f is the -// correct value. f is not called for all runes. The default tag type is -// Neutral. -func getWidthData(f func(r rune, tag elem, alt rune)) { - // Set the default values for Unified Ideographs. In line with Annex 11, - // we encode full ranges instead of the defined runes in Unified_Ideograph. - for _, b := range []struct{ lo, hi rune }{ - {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, - {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, - {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, - {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, - {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, - } { - for r := b.lo; r <= b.hi; r++ { - f(r, tagWide, 0) - } - } - - inverse := map[rune]rune{} - maps := map[string]bool{ - "": true, - "": true, - } - - // We cannot reuse package norm's decomposition, as we need an unexpanded - // decomposition. We make use of the opportunity to verify that the - // decomposition type is as expected. - ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) - if !maps[s[0]] { - return - } - x, err := strconv.ParseUint(s[1], 16, 32) - if err != nil { - log.Fatalf("Error parsing rune %q", s[1]) - } - if inverse[r] != 0 || inverse[rune(x)] != 0 { - log.Fatalf("Circular dependency in mapping between %U and %U", r, x) - } - inverse[r] = rune(x) - inverse[rune(x)] = r - }) - - // ; - ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { - tag, ok := typeMap[p.String(1)] - if !ok { - log.Fatalf("Unknown width type %q", p.String(1)) - } - r := p.Rune(0) - alt, ok := inverse[r] - if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { - tag |= tagNeedsFold - if !ok { - log.Fatalf("Narrow or wide rune %U has no decomposition", r) - } - } - f(r, tag, alt) - }) -} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go deleted file mode 100644 index c17334aa61..0000000000 --- a/vendor/golang.org/x/text/width/gen_trieval.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go deleted file mode 100644 index 2713dce64a..0000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/main.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// The gcexportdata command is a diagnostic tool that displays the -// contents of gc export data files. -package main - -import ( - "flag" - "fmt" - "go/token" - "go/types" - "log" - "os" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/go/types/typeutil" -) - -var packageFlag = flag.String("package", "", "alternative package to print") - -func main() { - log.SetPrefix("gcexportdata: ") - log.SetFlags(0) - flag.Usage = func() { - fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - os.Exit(2) - } - filename := flag.Args()[0] - - f, err := os.Open(filename) - if err != nil { - log.Fatal(err) - } - - r, err := gcexportdata.NewReader(f) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Decode the package. - const primary = "" - imports := make(map[string]*types.Package) - fset := token.NewFileSet() - pkg, err := gcexportdata.Read(r, fset, imports, primary) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Optionally select an indirectly mentioned package. - if *packageFlag != "" { - pkg = imports[*packageFlag] - if pkg == nil { - fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", - filename, *packageFlag) - for p := range imports { - if p != primary { - fmt.Fprintf(os.Stderr, "\t%s\n", p) - } - } - os.Exit(1) - } - } - - // Print all package-level declarations, including non-exported ones. - fmt.Printf("package %s\n", pkg.Name()) - for _, imp := range pkg.Imports() { - fmt.Printf("import %q\n", imp.Path()) - } - qual := func(p *types.Package) string { - if pkg == p { - return "" - } - return p.Name() - } - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - fmt.Printf("%s: %s\n", - fset.Position(obj.Pos()), - types.ObjectString(obj, qual)) - - // For types, print each method. - if _, ok := obj.(*types.TypeName); ok { - for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { - fmt.Printf("%s: %s\n", - fset.Position(method.Obj().Pos()), - types.SelectionString(method, qual)) - } - } - } -} diff --git a/vendor/golang.org/x/tools/internal/imports/mkindex.go b/vendor/golang.org/x/tools/internal/imports/mkindex.go deleted file mode 100644 index ef8c0d2871..0000000000 --- a/vendor/golang.org/x/tools/internal/imports/mkindex.go +++ /dev/null @@ -1,173 +0,0 @@ -// +build ignore - -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command mkindex creates the file "pkgindex.go" containing an index of the Go -// standard library. The file is intended to be built as part of the imports -// package, so that the package may be used in environments where a GOROOT is -// not available (such as App Engine). -package imports - -import ( - "bytes" - "fmt" - "go/ast" - "go/build" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "strings" -) - -var ( - pkgIndex = make(map[string][]pkg) - exports = make(map[string]map[string]bool) -) - -func main() { - // Don't use GOPATH. - ctx := build.Default - ctx.GOPATH = "" - - // Populate pkgIndex global from GOROOT. - for _, path := range ctx.SrcDirs() { - f, err := os.Open(path) - if err != nil { - log.Print(err) - continue - } - children, err := f.Readdir(-1) - f.Close() - if err != nil { - log.Print(err) - continue - } - for _, child := range children { - if child.IsDir() { - loadPkg(path, child.Name()) - } - } - } - // Populate exports global. - for _, ps := range pkgIndex { - for _, p := range ps { - e := loadExports(p.dir) - if e != nil { - exports[p.dir] = e - } - } - } - - // Construct source file. - var buf bytes.Buffer - fmt.Fprint(&buf, pkgIndexHead) - fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) - fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) - src := buf.Bytes() - - // Replace main.pkg type name with pkg. - src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) - // Replace actual GOROOT with "/go". - src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) - // Add some line wrapping. - src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) - src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) - - var err error - src, err = format.Source(src) - if err != nil { - log.Fatal(err) - } - - // Write out source file. - err = ioutil.WriteFile("pkgindex.go", src, 0644) - if err != nil { - log.Fatal(err) - } -} - -const pkgIndexHead = `package imports - -func init() { - pkgIndexOnce.Do(func() { - pkgIndex.m = pkgIndexMaster - }) - loadExports = func(dir string) map[string]bool { - return exportsMaster[dir] - } -} -` - -type pkg struct { - importpath string // full pkg import path, e.g. "net/http" - dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" -} - -var fset = token.NewFileSet() - -func loadPkg(root, importpath string) { - shortName := path.Base(importpath) - if shortName == "testdata" { - return - } - - dir := filepath.Join(root, importpath) - pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ - importpath: importpath, - dir: dir, - }) - - pkgDir, err := os.Open(dir) - if err != nil { - return - } - children, err := pkgDir.Readdir(-1) - pkgDir.Close() - if err != nil { - return - } - for _, child := range children { - name := child.Name() - if name == "" { - continue - } - if c := name[0]; c == '.' || ('0' <= c && c <= '9') { - continue - } - if child.IsDir() { - loadPkg(root, filepath.Join(importpath, name)) - } - } -} - -func loadExports(dir string) map[string]bool { - exports := make(map[string]bool) - buildPkg, err := build.ImportDir(dir, 0) - if err != nil { - if strings.Contains(err.Error(), "no buildable Go source files in") { - return nil - } - log.Printf("could not import %q: %v", dir, err) - return nil - } - for _, file := range buildPkg.GoFiles { - f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) - if err != nil { - log.Printf("could not parse %q: %v", file, err) - continue - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports[name] = true - } - } - } - return exports -} diff --git a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go b/vendor/golang.org/x/tools/internal/imports/mkstdlib.go deleted file mode 100644 index 39b86ccd90..0000000000 --- a/vendor/golang.org/x/tools/internal/imports/mkstdlib.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build ignore - -// mkstdlib generates the zstdlib.go file, containing the Go standard -// library API symbols. It's baked into the binary to avoid scanning -// GOPATH in the common case. -package main - -import ( - "bufio" - "bytes" - "fmt" - "go/format" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "sort" -) - -func mustOpen(name string) io.Reader { - f, err := os.Open(name) - if err != nil { - log.Fatal(err) - } - return f -} - -func api(base string) string { - return filepath.Join(runtime.GOROOT(), "api", base) -} - -var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) - -var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true} - -func main() { - var buf bytes.Buffer - outf := func(format string, args ...interface{}) { - fmt.Fprintf(&buf, format, args...) - } - outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") - outf("package imports\n") - outf("var stdlib = map[string][]string{\n") - f := io.MultiReader( - mustOpen(api("go1.txt")), - mustOpen(api("go1.1.txt")), - mustOpen(api("go1.2.txt")), - mustOpen(api("go1.3.txt")), - mustOpen(api("go1.4.txt")), - mustOpen(api("go1.5.txt")), - mustOpen(api("go1.6.txt")), - mustOpen(api("go1.7.txt")), - mustOpen(api("go1.8.txt")), - mustOpen(api("go1.9.txt")), - mustOpen(api("go1.10.txt")), - mustOpen(api("go1.11.txt")), - mustOpen(api("go1.12.txt")), - mustOpen(api("go1.13.txt")), - - // The API of the syscall/js package needs to be computed explicitly, - // because it's not included in the GOROOT/api/go1.*.txt files at this time. - syscallJSAPI(), - ) - sc := bufio.NewScanner(f) - - pkgs := map[string]map[string]bool{ - "unsafe": unsafeSyms, - } - paths := []string{"unsafe"} - - for sc.Scan() { - l := sc.Text() - if m := sym.FindStringSubmatch(l); m != nil { - path, sym := m[1], m[2] - - if _, ok := pkgs[path]; !ok { - pkgs[path] = map[string]bool{} - paths = append(paths, path) - } - pkgs[path][sym] = true - } - } - if err := sc.Err(); err != nil { - log.Fatal(err) - } - sort.Strings(paths) - for _, path := range paths { - outf("\t%q: []string{\n", path) - pkg := pkgs[path] - var syms []string - for sym := range pkg { - syms = append(syms, sym) - } - sort.Strings(syms) - for _, sym := range syms { - outf("\t\t%q,\n", sym) - } - outf("},\n") - } - outf("}\n") - fmtbuf, err := format.Source(buf.Bytes()) - if err != nil { - log.Fatal(err) - } - err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) - if err != nil { - log.Fatal(err) - } -} - -// syscallJSAPI returns the API of the syscall/js package. -// It's computed from the contents of $(go env GOROOT)/src/syscall/js. -func syscallJSAPI() io.Reader { - var exeSuffix string - if runtime.GOOS == "windows" { - exeSuffix = ".exe" - } - cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") - out, err := cmd.Output() - if err != nil { - log.Fatalln(err) - } - return bytes.NewReader(out) -} diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index 168cdb8578..0000000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,308 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -// Implementation of net.Error providing timeout -type netErrorTimeout struct { - error -} - -func (e netErrorTimeout) Timeout() bool { return true } -func (e netErrorTimeout) Temporary() bool { return false } - -var errClosed = fmt.Errorf("closed") -var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - - // Indicate that a write/read timeout has occurred - wtimedout bool - rtimedout bool - - wtimer *time.Timer - rtimer *time.Timer - - closed bool - writeClosed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - - p.wtimer = time.AfterFunc(0, func() {}) - p.rtimer = time.AfterFunc(0, func() {}) - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - if p.writeClosed { - return 0, io.EOF - } - if p.rtimedout { - return 0, errTimeout - } - - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed || p.writeClosed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - if p.wtimedout { - return 0, errTimeout - } - - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -func (p *pipe) closeWrite() error { - p.mu.Lock() - defer p.mu.Unlock() - p.writeClosed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.Reader - io.Writer -} - -func (c *conn) Close() error { - err1 := c.Reader.(*pipe).Close() - err2 := c.Writer.(*pipe).closeWrite() - if err1 != nil { - return err1 - } - return err2 -} - -func (c *conn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -func (c *conn) SetReadDeadline(t time.Time) error { - p := c.Reader.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.rtimer.Stop() - p.rtimedout = false - if !t.IsZero() { - p.rtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.rtimedout = true - p.rwait.Broadcast() - }) - } - return nil -} - -func (c *conn) SetWriteDeadline(t time.Time) error { - p := c.Writer.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.wtimer.Stop() - p.wtimedout = false - if !t.IsZero() { - p.wtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.wtimedout = true - p.wwait.Broadcast() - }) - } - return nil -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/modules.txt b/vendor/modules.txt index db71563d30..e2c866ed60 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,13 +1,13 @@ # cloud.google.com/go v0.49.0 cloud.google.com/go +cloud.google.com/go/compute/metadata cloud.google.com/go/iam +cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen -cloud.google.com/go/internal -cloud.google.com/go/compute/metadata # cloud.google.com/go/bigtable v1.1.0 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest @@ -61,35 +61,38 @@ github.com/asaskevich/govalidator github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr +github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/service/applicationautoscaling -github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface -github.com/aws/aws-sdk-go/service/dynamodb -github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface -github.com/aws/aws-sdk-go/service/s3 -github.com/aws/aws-sdk-go/service/s3/s3iface -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/internal/sdkio github.com/aws/aws-sdk-go/aws/client/metadata -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/crr github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults -github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/jsonrpc -github.com/aws/aws-sdk-go/aws/crr -github.com/aws/aws-sdk-go/internal/s3err +github.com/aws/aws-sdk-go/private/protocol/ec2query github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil @@ -103,16 +106,6 @@ github.com/aws/aws-sdk-go/service/s3/internal/arn github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/internal/sdkmath -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/service/ec2 -github.com/aws/aws-sdk-go/internal/sdkuri -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/ec2query # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible @@ -149,8 +142,8 @@ github.com/fluent/fluent-logger-golang/fluent github.com/fsouza/fake-gcs-server/fakestorage github.com/fsouza/fake-gcs-server/internal/backend # github.com/go-kit/kit v0.9.0 -github.com/go-kit/kit/log/level github.com/go-kit/kit/log +github.com/go-kit/kit/log/level # github.com/go-logfmt/logfmt v0.4.0 github.com/go-logfmt/logfmt # github.com/go-openapi/analysis v0.19.4 @@ -165,14 +158,14 @@ github.com/go-openapi/jsonreference # github.com/go-openapi/loads v0.19.2 github.com/go-openapi/loads # github.com/go-openapi/runtime v0.19.3 -github.com/go-openapi/runtime/middleware github.com/go-openapi/runtime +github.com/go-openapi/runtime/flagext github.com/go-openapi/runtime/logger +github.com/go-openapi/runtime/middleware github.com/go-openapi/runtime/middleware/denco github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security -github.com/go-openapi/runtime/flagext # github.com/go-openapi/spec v0.19.2 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.19.2 @@ -193,42 +186,42 @@ github.com/gogo/googleapis/google/rpc # github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -github.com/gogo/protobuf/types -github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/protoc-gen-gogo/descriptor +github.com/gogo/protobuf/sortkeys +github.com/gogo/protobuf/types # github.com/gogo/status v1.0.3 github.com/gogo/status # github.com/golang-migrate/migrate/v4 v4.7.0 github.com/golang-migrate/migrate/v4 -github.com/golang-migrate/migrate/v4/database/postgres -github.com/golang-migrate/migrate/v4/source/file github.com/golang-migrate/migrate/v4/database +github.com/golang-migrate/migrate/v4/database/postgres github.com/golang-migrate/migrate/v4/internal/url github.com/golang-migrate/migrate/v4/source +github.com/golang-migrate/migrate/v4/source/file # github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.2 +github.com/golang/protobuf/descriptor +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/empty -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/wrappers -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor -github.com/golang/protobuf/descriptor -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/protoc-gen-go/grpc -github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/grpc github.com/golang/protobuf/protoc-gen-go/plugin +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 github.com/golang/snappy # github.com/gomodule/redigo v2.0.0+incompatible -github.com/gomodule/redigo/redis github.com/gomodule/redigo/internal +github.com/gomodule/redigo/redis # github.com/google/btree v1.0.0 github.com/google/btree # github.com/google/go-cmp v0.3.1 @@ -252,29 +245,29 @@ github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors +github.com/gophercloud/gophercloud/openstack/compute/v2/flavors +github.com/gophercloud/gophercloud/openstack/compute/v2/images github.com/gophercloud/gophercloud/openstack/compute/v2/servers -github.com/gophercloud/gophercloud/pagination +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants github.com/gophercloud/gophercloud/openstack/identity/v2/tokens github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils -github.com/gophercloud/gophercloud/openstack/compute/v2/flavors -github.com/gophercloud/gophercloud/openstack/compute/v2/images -github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/pagination # github.com/gorilla/mux v1.7.1 github.com/gorilla/mux # github.com/gorilla/websocket v1.4.0 github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing github.com/grpc-ecosystem/go-grpc-middleware/tags +github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing github.com/grpc-ecosystem/go-grpc-middleware/util/metautils # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus # github.com/grpc-ecosystem/grpc-gateway v1.12.1 +github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool # github.com/hashicorp/consul/api v1.3.0 @@ -294,8 +287,8 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr # github.com/hashicorp/golang-lru v0.5.3 -github.com/hashicorp/golang-lru/simplelru github.com/hashicorp/golang-lru +github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/memberlist v0.1.5 github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.8.5 @@ -329,9 +322,9 @@ github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram # github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e +github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -github.com/mailru/easyjson/buffer # github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe github.com/mattn/go-ieproxy # github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -369,8 +362,8 @@ github.com/opentracing-contrib/go-grpc github.com/opentracing-contrib/go-stdlib/nethttp # github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/log github.com/opentracing/opentracing-go/ext +github.com/opentracing/opentracing-go/log # github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8 github.com/philhofer/fwd # github.com/pkg/errors v0.8.1 @@ -379,11 +372,25 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/prometheus/alertmanager v0.19.0 github.com/prometheus/alertmanager/api +github.com/prometheus/alertmanager/api/metrics +github.com/prometheus/alertmanager/api/v1 +github.com/prometheus/alertmanager/api/v2 +github.com/prometheus/alertmanager/api/v2/models +github.com/prometheus/alertmanager/api/v2/restapi +github.com/prometheus/alertmanager/api/v2/restapi/operations +github.com/prometheus/alertmanager/api/v2/restapi/operations/alert +github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup +github.com/prometheus/alertmanager/api/v2/restapi/operations/general +github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver +github.com/prometheus/alertmanager/api/v2/restapi/operations/silence +github.com/prometheus/alertmanager/asset github.com/prometheus/alertmanager/cluster +github.com/prometheus/alertmanager/cluster/clusterpb github.com/prometheus/alertmanager/config github.com/prometheus/alertmanager/dispatch github.com/prometheus/alertmanager/inhibit github.com/prometheus/alertmanager/nflog +github.com/prometheus/alertmanager/nflog/nflogpb github.com/prometheus/alertmanager/notify github.com/prometheus/alertmanager/notify/email github.com/prometheus/alertmanager/notify/hipchat @@ -394,47 +401,33 @@ github.com/prometheus/alertmanager/notify/slack github.com/prometheus/alertmanager/notify/victorops github.com/prometheus/alertmanager/notify/webhook github.com/prometheus/alertmanager/notify/wechat +github.com/prometheus/alertmanager/pkg/parse +github.com/prometheus/alertmanager/provider github.com/prometheus/alertmanager/provider/mem github.com/prometheus/alertmanager/silence +github.com/prometheus/alertmanager/silence/silencepb +github.com/prometheus/alertmanager/store github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -github.com/prometheus/alertmanager/api/v1 -github.com/prometheus/alertmanager/api/v2 -github.com/prometheus/alertmanager/provider -github.com/prometheus/alertmanager/cluster/clusterpb -github.com/prometheus/alertmanager/store -github.com/prometheus/alertmanager/nflog/nflogpb -github.com/prometheus/alertmanager/silence/silencepb -github.com/prometheus/alertmanager/asset -github.com/prometheus/alertmanager/api/v2/models -github.com/prometheus/alertmanager/api/metrics -github.com/prometheus/alertmanager/pkg/parse -github.com/prometheus/alertmanager/api/v2/restapi -github.com/prometheus/alertmanager/api/v2/restapi/operations -github.com/prometheus/alertmanager/api/v2/restapi/operations/alert -github.com/prometheus/alertmanager/api/v2/restapi/operations/alertgroup -github.com/prometheus/alertmanager/api/v2/restapi/operations/general -github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver -github.com/prometheus/alertmanager/api/v2/restapi/operations/silence # github.com/prometheus/client_golang v1.2.1 -github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 -github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal -github.com/prometheus/client_golang/prometheus/testutil +github.com/prometheus/client_golang/prometheus/promauto +github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/push +github.com/prometheus/client_golang/prometheus/testutil # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go # github.com/prometheus/common v0.7.0 -github.com/prometheus/common/version -github.com/prometheus/common/model -github.com/prometheus/common/route github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model +github.com/prometheus/common/route +github.com/prometheus/common/version # github.com/prometheus/procfs v0.0.6 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -460,59 +453,59 @@ github.com/prometheus/prometheus/notifier github.com/prometheus/prometheus/pkg/exemplar github.com/prometheus/prometheus/pkg/gate # github.com/prometheus/prometheus v1.8.2-0.20191126064551-80ba03c67da1 +github.com/prometheus/prometheus/config +github.com/prometheus/prometheus/discovery +github.com/prometheus/prometheus/discovery/azure +github.com/prometheus/prometheus/discovery/config +github.com/prometheus/prometheus/discovery/consul +github.com/prometheus/prometheus/discovery/dns +github.com/prometheus/prometheus/discovery/ec2 +github.com/prometheus/prometheus/discovery/file +github.com/prometheus/prometheus/discovery/gce +github.com/prometheus/prometheus/discovery/kubernetes +github.com/prometheus/prometheus/discovery/marathon +github.com/prometheus/prometheus/discovery/openstack +github.com/prometheus/prometheus/discovery/refresh +github.com/prometheus/prometheus/discovery/targetgroup +github.com/prometheus/prometheus/discovery/triton +github.com/prometheus/prometheus/discovery/zookeeper +github.com/prometheus/prometheus/notifier +github.com/prometheus/prometheus/pkg/exemplar +github.com/prometheus/prometheus/pkg/gate github.com/prometheus/prometheus/pkg/labels -github.com/prometheus/prometheus/promql -github.com/prometheus/prometheus/tsdb/chunkenc +github.com/prometheus/prometheus/pkg/logging +github.com/prometheus/prometheus/pkg/pool github.com/prometheus/prometheus/pkg/relabel -github.com/prometheus/prometheus/tsdb github.com/prometheus/prometheus/pkg/rulefmt -github.com/prometheus/prometheus/rules github.com/prometheus/prometheus/pkg/textparse github.com/prometheus/prometheus/pkg/timestamp github.com/prometheus/prometheus/pkg/value +github.com/prometheus/prometheus/prompb +github.com/prometheus/prometheus/promql +github.com/prometheus/prometheus/rules +github.com/prometheus/prometheus/scrape github.com/prometheus/prometheus/storage +github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/storage/tsdb -github.com/prometheus/prometheus/util/stats -github.com/prometheus/prometheus/util/strutil -github.com/prometheus/prometheus/util/testutil -github.com/prometheus/prometheus/config -github.com/prometheus/prometheus/web/api/v1 -github.com/prometheus/prometheus/pkg/gate -github.com/prometheus/prometheus/tsdb/errors -github.com/prometheus/prometheus/tsdb/fileutil -github.com/prometheus/prometheus/tsdb/wal -github.com/prometheus/prometheus/scrape -github.com/prometheus/prometheus/discovery -github.com/prometheus/prometheus/discovery/config -github.com/prometheus/prometheus/discovery/dns -github.com/prometheus/prometheus/discovery/targetgroup -github.com/prometheus/prometheus/notifier -github.com/prometheus/prometheus/util/teststorage +github.com/prometheus/prometheus/template +github.com/prometheus/prometheus/tsdb +github.com/prometheus/prometheus/tsdb/chunkenc github.com/prometheus/prometheus/tsdb/chunks github.com/prometheus/prometheus/tsdb/encoding +github.com/prometheus/prometheus/tsdb/errors +github.com/prometheus/prometheus/tsdb/fileutil github.com/prometheus/prometheus/tsdb/goversion github.com/prometheus/prometheus/tsdb/index github.com/prometheus/prometheus/tsdb/record github.com/prometheus/prometheus/tsdb/tombstones -github.com/prometheus/prometheus/template -github.com/prometheus/prometheus/pkg/exemplar -github.com/prometheus/prometheus/prompb -github.com/prometheus/prometheus/storage/remote +github.com/prometheus/prometheus/tsdb/wal github.com/prometheus/prometheus/util/httputil -github.com/prometheus/prometheus/pkg/pool -github.com/prometheus/prometheus/discovery/azure -github.com/prometheus/prometheus/discovery/consul -github.com/prometheus/prometheus/discovery/ec2 -github.com/prometheus/prometheus/discovery/file -github.com/prometheus/prometheus/discovery/gce -github.com/prometheus/prometheus/discovery/kubernetes -github.com/prometheus/prometheus/discovery/marathon -github.com/prometheus/prometheus/discovery/openstack -github.com/prometheus/prometheus/discovery/triton -github.com/prometheus/prometheus/discovery/zookeeper -github.com/prometheus/prometheus/discovery/refresh -github.com/prometheus/prometheus/pkg/logging +github.com/prometheus/prometheus/util/stats +github.com/prometheus/prometheus/util/strutil +github.com/prometheus/prometheus/util/teststorage +github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/treecache +github.com/prometheus/prometheus/web/api/v1 # github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 github.com/rafaeljusto/redigomock # github.com/rs/cors v1.6.0 @@ -568,23 +561,25 @@ github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/runutil github.com/stretchr/testify/mock github.com/stretchr/testify/assert +github.com/stretchr/testify/mock +github.com/stretchr/testify/require # github.com/thanos-io/thanos v0.8.1-0.20200102143048-a37ac093a67a +github.com/thanos-io/thanos/pkg/block +github.com/thanos-io/thanos/pkg/block/metadata github.com/thanos-io/thanos/pkg/compact github.com/thanos-io/thanos/pkg/compact/downsample -github.com/thanos-io/thanos/pkg/objstore -github.com/thanos-io/thanos/pkg/block/metadata -github.com/thanos-io/thanos/pkg/shipper +github.com/thanos-io/thanos/pkg/component +github.com/thanos-io/thanos/pkg/extprom github.com/thanos-io/thanos/pkg/model +github.com/thanos-io/thanos/pkg/objstore +github.com/thanos-io/thanos/pkg/objstore/gcs +github.com/thanos-io/thanos/pkg/objstore/s3 +github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/runutil +github.com/thanos-io/thanos/pkg/shipper github.com/thanos-io/thanos/pkg/store github.com/thanos-io/thanos/pkg/store/cache github.com/thanos-io/thanos/pkg/store/storepb -github.com/thanos-io/thanos/pkg/objstore/gcs -github.com/thanos-io/thanos/pkg/objstore/s3 -github.com/thanos-io/thanos/pkg/block -github.com/thanos-io/thanos/pkg/component -github.com/thanos-io/thanos/pkg/extprom -github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/strutil github.com/thanos-io/thanos/pkg/tracing # github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d @@ -595,23 +590,23 @@ github.com/tmc/grpc-websocket-proxy/wsproxy github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage +github.com/uber/jaeger-client-go/internal/baggage/remote github.com/uber/jaeger-client-go/internal/spanlog github.com/uber/jaeger-client-go/internal/throttler +github.com/uber/jaeger-client-go/internal/throttler/remote github.com/uber/jaeger-client-go/log +github.com/uber/jaeger-client-go/rpcmetrics github.com/uber/jaeger-client-go/thrift +github.com/uber/jaeger-client-go/thrift-gen/agent +github.com/uber/jaeger-client-go/thrift-gen/baggage github.com/uber/jaeger-client-go/thrift-gen/jaeger github.com/uber/jaeger-client-go/thrift-gen/sampling github.com/uber/jaeger-client-go/thrift-gen/zipkincore +github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils -github.com/uber/jaeger-client-go/internal/baggage/remote -github.com/uber/jaeger-client-go/internal/throttler/remote -github.com/uber/jaeger-client-go/rpcmetrics -github.com/uber/jaeger-client-go/transport -github.com/uber/jaeger-client-go/thrift-gen/agent -github.com/uber/jaeger-client-go/thrift-gen/baggage # github.com/uber/jaeger-lib v2.2.0+incompatible -github.com/uber/jaeger-lib/metrics/prometheus github.com/uber/jaeger-lib/metrics +github.com/uber/jaeger-lib/metrics/prometheus # github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 github.com/weaveworks/billing-client # github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 @@ -620,16 +615,20 @@ github.com/weaveworks/common/aws github.com/weaveworks/common/tracing github.com/weaveworks/common/server github.com/weaveworks/common/user +# github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 +github.com/weaveworks/common/aws github.com/weaveworks/common/errors github.com/weaveworks/common/httpgrpc +github.com/weaveworks/common/httpgrpc/server github.com/weaveworks/common/instrument -github.com/weaveworks/common/mtime -github.com/weaveworks/common/aws github.com/weaveworks/common/logging -github.com/weaveworks/common/httpgrpc/server github.com/weaveworks/common/middleware +github.com/weaveworks/common/mtime +github.com/weaveworks/common/server github.com/weaveworks/common/signals github.com/weaveworks/common/test +github.com/weaveworks/common/tracing +github.com/weaveworks/common/user # github.com/weaveworks/promrus v1.2.0 github.com/weaveworks/promrus # github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 @@ -637,23 +636,32 @@ github.com/xiang90/probing # go.etcd.io/bbolt v1.3.3 go.etcd.io/bbolt # go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5 -go.etcd.io/etcd/clientv3 -go.etcd.io/etcd/embed -go.etcd.io/etcd/etcdserver/api/v3client +go.etcd.io/etcd/auth go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/client +go.etcd.io/etcd/clientv3 go.etcd.io/etcd/clientv3/balancer go.etcd.io/etcd/clientv3/balancer/picker go.etcd.io/etcd/clientv3/balancer/resolver/endpoint -go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes -go.etcd.io/etcd/etcdserver/etcdserverpb -go.etcd.io/etcd/mvcc/mvccpb -go.etcd.io/etcd/pkg/logutil -go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/clientv3/concurrency +go.etcd.io/etcd/embed go.etcd.io/etcd/etcdserver +go.etcd.io/etcd/etcdserver/api go.etcd.io/etcd/etcdserver/api/etcdhttp +go.etcd.io/etcd/etcdserver/api/membership go.etcd.io/etcd/etcdserver/api/rafthttp +go.etcd.io/etcd/etcdserver/api/snap +go.etcd.io/etcd/etcdserver/api/snap/snappb +go.etcd.io/etcd/etcdserver/api/v2auth +go.etcd.io/etcd/etcdserver/api/v2discovery +go.etcd.io/etcd/etcdserver/api/v2error go.etcd.io/etcd/etcdserver/api/v2http +go.etcd.io/etcd/etcdserver/api/v2http/httptypes +go.etcd.io/etcd/etcdserver/api/v2stats +go.etcd.io/etcd/etcdserver/api/v2store go.etcd.io/etcd/etcdserver/api/v2v3 +go.etcd.io/etcd/etcdserver/api/v3alarm +go.etcd.io/etcd/etcdserver/api/v3client go.etcd.io/etcd/etcdserver/api/v3compactor go.etcd.io/etcd/etcdserver/api/v3election go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb @@ -662,121 +670,112 @@ go.etcd.io/etcd/etcdserver/api/v3lock go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw go.etcd.io/etcd/etcdserver/api/v3rpc +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/etcdserver/etcdserverpb go.etcd.io/etcd/etcdserver/etcdserverpb/gw -go.etcd.io/etcd/pkg/debugutil -go.etcd.io/etcd/pkg/flags -go.etcd.io/etcd/pkg/httputil -go.etcd.io/etcd/pkg/netutil -go.etcd.io/etcd/pkg/runtime -go.etcd.io/etcd/pkg/srv -go.etcd.io/etcd/pkg/tlsutil -go.etcd.io/etcd/pkg/transport -go.etcd.io/etcd/version -go.etcd.io/etcd/wal -go.etcd.io/etcd/proxy/grpcproxy/adapter -go.etcd.io/etcd/pkg/systemd -go.etcd.io/etcd/raft -go.etcd.io/etcd/auth -go.etcd.io/etcd/etcdserver/api -go.etcd.io/etcd/etcdserver/api/membership -go.etcd.io/etcd/etcdserver/api/snap -go.etcd.io/etcd/etcdserver/api/v2discovery -go.etcd.io/etcd/etcdserver/api/v2http/httptypes -go.etcd.io/etcd/etcdserver/api/v2stats -go.etcd.io/etcd/etcdserver/api/v2store -go.etcd.io/etcd/etcdserver/api/v3alarm go.etcd.io/etcd/lease go.etcd.io/etcd/lease/leasehttp +go.etcd.io/etcd/lease/leasepb go.etcd.io/etcd/mvcc go.etcd.io/etcd/mvcc/backend +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/adt go.etcd.io/etcd/pkg/contention +go.etcd.io/etcd/pkg/cpuutil +go.etcd.io/etcd/pkg/crc +go.etcd.io/etcd/pkg/debugutil go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/flags +go.etcd.io/etcd/pkg/httputil go.etcd.io/etcd/pkg/idutil +go.etcd.io/etcd/pkg/ioutil +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/netutil +go.etcd.io/etcd/pkg/pathutil go.etcd.io/etcd/pkg/pbutil +go.etcd.io/etcd/pkg/runtime go.etcd.io/etcd/pkg/schedule +go.etcd.io/etcd/pkg/srv +go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types go.etcd.io/etcd/pkg/wait -go.etcd.io/etcd/raft/raftpb -go.etcd.io/etcd/wal/walpb -go.etcd.io/etcd/etcdserver/api/v2error -go.etcd.io/etcd/pkg/ioutil -go.etcd.io/etcd/etcdserver/api/v2auth -go.etcd.io/etcd/clientv3/concurrency -go.etcd.io/etcd/pkg/adt -go.etcd.io/etcd/pkg/cpuutil -go.etcd.io/etcd/pkg/crc +go.etcd.io/etcd/proxy/grpcproxy/adapter +go.etcd.io/etcd/raft go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/raftpb go.etcd.io/etcd/raft/tracker -go.etcd.io/etcd/etcdserver/api/snap/snappb -go.etcd.io/etcd/client -go.etcd.io/etcd/lease/leasepb -go.etcd.io/etcd/pkg/pathutil +go.etcd.io/etcd/version +go.etcd.io/etcd/wal +go.etcd.io/etcd/wal/walpb # go.mongodb.org/mongo-driver v1.1.0 go.mongodb.org/mongo-driver/bson -go.mongodb.org/mongo-driver/bson/bsontype -go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonrw +go.mongodb.org/mongo-driver/bson/bsontype +go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore # go.opencensus.io v0.22.2 -go.opencensus.io/plugin/ochttp -go.opencensus.io/trace +go.opencensus.io +go.opencensus.io/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/resource go.opencensus.io/stats +go.opencensus.io/stats/internal go.opencensus.io/stats/view go.opencensus.io/tag -go.opencensus.io/trace/propagation -go.opencensus.io/internal +go.opencensus.io/trace go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -go.opencensus.io/metric/metricdata -go.opencensus.io/stats/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricproducer -go.opencensus.io -go.opencensus.io/resource # go.uber.org/atomic v1.5.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr # go.uber.org/zap v1.10.0 go.uber.org/zap -go.uber.org/zap/zapcore -go.uber.org/zap/internal/bufferpool go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 +golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/ed25519 -golang.org/x/crypto/argon2 golang.org/x/crypto/ed25519/internal/edwards25519 -golang.org/x/crypto/blake2b golang.org/x/crypto/ssh/terminal # golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 -golang.org/x/exp/cmd/apidiff golang.org/x/exp/apidiff +golang.org/x/exp/cmd/apidiff # golang.org/x/lint v0.0.0-20190930215403-16217165b5de -golang.org/x/lint/golint golang.org/x/lint +golang.org/x/lint/golint # golang.org/x/net v0.0.0-20191112182307-2180aed22343 +golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp -golang.org/x/net/netutil -golang.org/x/net/trace -golang.org/x/net/internal/timeseries +golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack -golang.org/x/net/ipv4 -golang.org/x/net/ipv6 -golang.org/x/net/http/httpguts -golang.org/x/net/publicsuffix golang.org/x/net/idna -golang.org/x/net/http/httpproxy -golang.org/x/net/bpf golang.org/x/net/internal/iana golang.org/x/net/internal/socket +golang.org/x/net/internal/timeseries +golang.org/x/net/ipv4 +golang.org/x/net/ipv6 +golang.org/x/net/netutil +golang.org/x/net/publicsuffix +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/google @@ -786,94 +785,104 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/errgroup # golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 -golang.org/x/sys/windows +golang.org/x/sys/cpu golang.org/x/sys/unix +golang.org/x/sys/windows golang.org/x/sys/windows/registry -golang.org/x/sys/cpu # golang.org/x/text v0.3.2 -golang.org/x/text/transform -golang.org/x/text/unicode/norm golang.org/x/text/secure/bidirule +golang.org/x/text/transform golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate # golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 golang.org/x/tools/cmd/goimports -golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/packages -golang.org/x/tools/internal/imports golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/buildutil +golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/internal/gopathwalk -golang.org/x/tools/internal/semver -golang.org/x/tools/internal/span -golang.org/x/tools/go/ast/astutil -golang.org/x/tools/internal/module +golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath -golang.org/x/tools/go/buildutil -golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/imports +golang.org/x/tools/internal/module +golang.org/x/tools/internal/semver +golang.org/x/tools/internal/span # google.golang.org/api v0.14.0 -google.golang.org/api/option -google.golang.org/api/transport/http google.golang.org/api/cloudresourcemanager/v1 -google.golang.org/api/iterator -google.golang.org/api/transport/grpc +google.golang.org/api/compute/v1 google.golang.org/api/googleapi -google.golang.org/api/storage/v1 -google.golang.org/api/internal google.golang.org/api/googleapi/transport -google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport +google.golang.org/api/internal google.golang.org/api/internal/gensupport google.golang.org/api/internal/third_party/uritemplates -google.golang.org/api/compute/v1 +google.golang.org/api/iterator +google.golang.org/api/option +google.golang.org/api/storage/v1 +google.golang.org/api/transport +google.golang.org/api/transport/grpc +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.6.5 -google.golang.org/appengine/urlfetch google.golang.org/appengine -google.golang.org/appengine/socket google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket +google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 -google.golang.org/genproto/googleapis/rpc/status +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 -google.golang.org/genproto/protobuf/field_mask google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/longrunning google.golang.org/genproto/googleapis/rpc/code -google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.25.1 google.golang.org/grpc -google.golang.org/grpc/codes -google.golang.org/grpc/status -google.golang.org/grpc/health/grpc_health_v1 -google.golang.org/grpc/encoding/gzip -google.golang.org/grpc/metadata -google.golang.org/grpc/naming -google.golang.org/grpc/test/bufconn google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/google +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload @@ -885,30 +894,19 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats +google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall -google.golang.org/grpc/balancer/grpclb -google.golang.org/grpc/credentials/google -google.golang.org/grpc/credentials/oauth -google.golang.org/grpc/resolver/dns -google.golang.org/grpc/resolver/passthrough -google.golang.org/grpc/health -google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 -google.golang.org/grpc/credentials/alts -google.golang.org/grpc/credentials/alts/internal -google.golang.org/grpc/credentials/alts/internal/handshaker -google.golang.org/grpc/credentials/alts/internal/handshaker/service -google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp -google.golang.org/grpc/credentials/alts/internal/authinfo -google.golang.org/grpc/credentials/alts/internal/conn # gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 # gopkg.in/fsnotify/fsnotify.v1 v1.4.7 @@ -920,44 +918,42 @@ gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.2.5 gopkg.in/yaml.v2 # honnef.co/go/tools v0.0.1-2019.2.3 +honnef.co/go/tools/arg honnef.co/go/tools/cmd/staticcheck -honnef.co/go/tools/lint -honnef.co/go/tools/lint/lintutil -honnef.co/go/tools/simple -honnef.co/go/tools/staticcheck -honnef.co/go/tools/stylecheck -honnef.co/go/tools/unused honnef.co/go/tools/config +honnef.co/go/tools/deprecated honnef.co/go/tools/facts +honnef.co/go/tools/functions +honnef.co/go/tools/go/types/typeutil honnef.co/go/tools/internal/cache -honnef.co/go/tools/loader -honnef.co/go/tools/lint/lintutil/format -honnef.co/go/tools/version -honnef.co/go/tools/arg honnef.co/go/tools/internal/passes/buildssa +honnef.co/go/tools/internal/renameio honnef.co/go/tools/internal/sharedcheck +honnef.co/go/tools/lint honnef.co/go/tools/lint/lintdsl -honnef.co/go/tools/deprecated -honnef.co/go/tools/functions +honnef.co/go/tools/lint/lintutil +honnef.co/go/tools/lint/lintutil/format +honnef.co/go/tools/loader honnef.co/go/tools/printf +honnef.co/go/tools/simple honnef.co/go/tools/ssa honnef.co/go/tools/ssautil +honnef.co/go/tools/staticcheck honnef.co/go/tools/staticcheck/vrp -honnef.co/go/tools/go/types/typeutil -honnef.co/go/tools/internal/renameio +honnef.co/go/tools/stylecheck +honnef.co/go/tools/unused +honnef.co/go/tools/version # k8s.io/api v0.0.0-20191115095533-47f6de673b26 -k8s.io/api/core/v1 -k8s.io/api/extensions/v1beta1 -k8s.io/api/apps/v1beta1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 -k8s.io/api/autoscaling/v1 +k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 +k8s.io/api/autoscaling/v1 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 @@ -966,12 +962,14 @@ k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 -k8s.io/api/policy/v1beta1 +k8s.io/api/core/v1 k8s.io/api/events/v1beta1 +k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1alpha1 k8s.io/api/node/v1beta1 +k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 @@ -983,52 +981,49 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 # k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2 -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/api/meta -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/clock -k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/util/wait -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/apis/meta/internalversion -k8s.io/apimachinery/pkg/runtime/serializer/json -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/recognizer -k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab -k8s.io/client-go/kubernetes -k8s.io/client-go/rest -k8s.io/client-go/tools/cache -k8s.io/client-go/tools/metrics -k8s.io/client-go/util/workqueue k8s.io/client-go/discovery +k8s.io/client-go/kubernetes +k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -1065,28 +1060,31 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/util/flowcontrol +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/transport -k8s.io/client-go/util/cert +k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager -k8s.io/client-go/util/retry -k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/reference -k8s.io/client-go/pkg/apis/clientauthentication -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 -k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/transport +k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry +k8s.io/client-go/util/workqueue # k8s.io/klog v1.0.0 k8s.io/klog # k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 k8s.io/utils/buffer -k8s.io/utils/trace k8s.io/utils/integer +k8s.io/utils/trace # rsc.io/binaryregexp v0.2.0 rsc.io/binaryregexp rsc.io/binaryregexp/syntax From 4ba8e8ce07ffd0ead4f0b4e23297781ac573e134 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Wed, 5 Feb 2020 11:30:23 -0500 Subject: [PATCH 09/20] update build image to quay official repo. Signed-off-by: Cyril Tovena --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ea08a1e0dd..32d6cd6220 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ defaults: &defaults docker: - - image: ctovena/cortex-build:update-gogoproto-2294034c + - image: quay.io/cortexproject/cortex:update-gogoproto-6e528d554 working_directory: /go/src/github.com/cortexproject/cortex workflows: From 3a066f5952ee20d8229e98b1d8b490a44a52420c Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 17:35:59 -0500 Subject: [PATCH 10/20] Remove changelog entry. Signed-off-by: Cyril Tovena --- docs/configuration/config-file-reference.md | 227 -------------------- 1 file changed, 227 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 8a501dbc1e..fabbb8902c 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -86,14 +86,11 @@ Supported contents and default values of the config file: # and used by the 'configs' service to expose APIs to manage them. [configdb: ] -<<<<<<< HEAD -======= # The configstore_config configures the config database storing rules and # alerts, and is used by the Cortex alertmanager. # The CLI flags prefix for this block config is: alertmanager [config_store: ] ->>>>>>> Removes proto compat go tests as everything looks good. # The alertmanager_config configures the Cortex alertmanager. [alertmanager: ] @@ -105,12 +102,6 @@ runtime_config: # File with the configuration that can be updated in runtime. # CLI flag: -runtime-config.file [file: | default = ""] -<<<<<<< HEAD - -# The memberlist_config configures the Gossip memberlist. -[memberlist: ] -======= ->>>>>>> Removes proto compat go tests as everything looks good. ``` ## `server_config` @@ -174,34 +165,6 @@ The `server_config` configures the HTTP and gRPC server of the launched service( # CLI flag: -server.grpc-max-concurrent-streams [grpc_server_max_concurrent_streams: | default = 100] -<<<<<<< HEAD -# The duration after which an idle connection should be closed. Default: -# infinity -# CLI flag: -server.grpc.keepalive.max-connection-idle -[grpc_server_max_connection_idle: | default = 2562047h47m16.854775807s] - -# The duration for the maximum amount of time a connection may exist before it -# will be closed. Default: infinity -# CLI flag: -server.grpc.keepalive.max-connection-age -[grpc_server_max_connection_age: | default = 2562047h47m16.854775807s] - -# An additive period after max-connection-age after which the connection will be -# forcibly closed. Default: infinity -# CLI flag: -server.grpc.keepalive.max-connection-age-grace -[grpc_server_max_connection_age_grace: | default = 2562047h47m16.854775807s] - -# Duration after which a keepalive probe is sent in case of no activity over the -# connection., Default: 2h -# CLI flag: -server.grpc.keepalive.time -[grpc_server_keepalive_time: | default = 2h0m0s] - -# After having pinged for keepalive check, the duration after which an idle -# connection should be closed, Default: 20s -# CLI flag: -server.grpc.keepalive.timeout -[grpc_server_keepalive_timeout: | default = 20s] - -======= ->>>>>>> Removes proto compat go tests as everything looks good. # Only log messages with the given severity or above. Valid levels: [debug, # info, warn, error] # CLI flag: -log.level @@ -283,13 +246,10 @@ ha_tracker: # The CLI flags prefix for this block config is: distributor.ha-tracker [etcd: ] -<<<<<<< HEAD -======= # The memberlist_config configures the Gossip memberlist. # The CLI flags prefix for this block config is: distributor.ha-tracker [memberlist: ] ->>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ha-tracker.multi.primary @@ -343,13 +303,10 @@ ring: # The CLI flags prefix for this block config is: distributor.ring [etcd: ] -<<<<<<< HEAD -======= # The memberlist_config configures the Gossip memberlist. # The CLI flags prefix for this block config is: distributor.ring [memberlist: ] ->>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ring.multi.primary @@ -421,12 +378,9 @@ lifecycler: # The etcd_config configures the etcd client. [etcd: ] -<<<<<<< HEAD -======= # The memberlist_config configures the Gossip memberlist. [memberlist: ] ->>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -multi.primary @@ -633,11 +587,7 @@ results_cache: # The default validity of entries for caches unless overridden. # CLI flag: -frontend.default-validity -<<<<<<< HEAD - [default_validity: | default = 0s] -======= [defaul_validity: | default = 0s] ->>>>>>> Removes proto compat go tests as everything looks good. background: # How many goroutines to use to write back to memcache. @@ -686,16 +636,10 @@ results_cache: The `ruler_config` configures the Cortex ruler. ```yaml -<<<<<<< HEAD -# URL of alerts return path. -# CLI flag: -ruler.external.url -[externalurl: | default = ] -======= externalurl: # URL of alerts return path. # CLI flag: -ruler.external.url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # How frequently to evaluate rules # CLI flag: -ruler.evaluation-interval @@ -719,16 +663,10 @@ storeconfig: # CLI flag: -ruler.rule-path [rulepath: | default = "/rules"] -<<<<<<< HEAD -# URL of the Alertmanager to send notifications to. -# CLI flag: -ruler.alertmanager-url -[alertmanagerurl: | default = ] -======= alertmanagerurl: # URL of the Alertmanager to send notifications to. # CLI flag: -ruler.alertmanager-url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # Use DNS SRV records to discover alertmanager hosts. # CLI flag: -ruler.alertmanager-discovery @@ -777,13 +715,10 @@ ring: # The CLI flags prefix for this block config is: ruler.ring [etcd: ] -<<<<<<< HEAD -======= # The memberlist_config configures the Gossip memberlist. # The CLI flags prefix for this block config is: ruler.ring [memberlist: ] ->>>>>>> Removes proto compat go tests as everything looks good. multi: # Primary backend storage used by multi-client. # CLI flag: -ruler.ring.multi.primary @@ -817,13 +752,6 @@ ring: # Period with which to attempt to flush rule groups. # CLI flag: -ruler.flush-period [flushcheckperiod: | default = 1m0s] -<<<<<<< HEAD - -# Enable the ruler api -# CLI flag: -experimental.ruler.enable-api -[enable_api: | default = false] -======= ->>>>>>> Removes proto compat go tests as everything looks good. ``` ## `alertmanager_config` @@ -839,15 +767,6 @@ The `alertmanager_config` configures the Cortex alertmanager. # CLI flag: -alertmanager.storage.retention [retention: | default = 120h0m0s] -<<<<<<< HEAD -# The URL under which Alertmanager is externally reachable (for example, if -# Alertmanager is served via a reverse proxy). Used for generating relative and -# absolute links back to Alertmanager itself. If the URL has a path portion, it -# will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, -# relevant URL components will be derived automatically. -# CLI flag: -alertmanager.web.external-url -[externalurl: | default = ] -======= externalurl: # The URL under which Alertmanager is externally reachable (for example, if # Alertmanager is served via a reverse proxy). Used for generating relative @@ -857,7 +776,6 @@ externalurl: # automatically. # CLI flag: -alertmanager.web.external-url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # How frequently to poll Cortex configs # CLI flag: -alertmanager.configs.poll-interval @@ -886,25 +804,6 @@ externalurl: # Root of URL to generate if config is http://internal.monitor # CLI flag: -alertmanager.configs.auto-webhook-root [autowebhookroot: | default = ""] -<<<<<<< HEAD - -store: - # Type of backend to use to store alertmanager configs. Supported values are: - # "configdb", "local". - # CLI flag: -alertmanager.storage.type - [type: | default = "configdb"] - - # The configstore_config configures the config database storing rules and - # alerts, and is used by the Cortex alertmanager. - # The CLI flags prefix for this block config is: alertmanager - [configdb: ] - - local: - # Path at which alertmanager configurations are stored. - # CLI flag: -alertmanager.storage.local.path - [path: | default = ""] -======= ->>>>>>> Removes proto compat go tests as everything looks good. ``` ## `table_manager_config` @@ -1249,20 +1148,12 @@ The `storage_config` configures where Cortex stores the data (chunks storage eng aws: dynamodbconfig: -<<<<<<< HEAD - # DynamoDB endpoint URL with escaped Key and Secret encoded. If only region - # is specified as a host, proper endpoint will be deduced. Use - # inmemory:/// to use a mock in-memory implementation. - # CLI flag: -dynamodb.url - [dynamodb: | default = ] -======= dynamodb: # DynamoDB endpoint URL with escaped Key and Secret encoded. If only # region is specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. # CLI flag: -dynamodb.url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # DynamoDB table management requests per second limit. # CLI flag: -dynamodb.api-limit @@ -1272,16 +1163,10 @@ aws: # CLI flag: -dynamodb.throttle-limit [throttlelimit: | default = 10] -<<<<<<< HEAD - # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. - # CLI flag: -applicationautoscaling.url - [applicationautoscaling: | default = ] -======= applicationautoscaling: # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. # CLI flag: -applicationautoscaling.url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. metrics: # Use metrics-based autoscaling, via this query URL @@ -1329,20 +1214,12 @@ aws: # CLI flag: -dynamodb.chunk.get.max.parallelism [chunkgetmaxparallelism: | default = 32] -<<<<<<< HEAD - # S3 endpoint URL with escaped Key and Secret encoded. If only region is - # specified as a host, proper endpoint will be deduced. Use - # inmemory:/// to use a mock in-memory implementation. - # CLI flag: -s3.url - [s3: | default = ] -======= s3: # S3 endpoint URL with escaped Key and Secret encoded. If only region is # specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. # CLI flag: -s3.url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # Comma separated list of bucket names to evenly distribute chunks over. # Overrides any buckets specified in s3.url flag @@ -1515,24 +1392,6 @@ cassandra: # CLI flag: -cassandra.password [password: | default = ""] -<<<<<<< HEAD - # File containing password to use when connecting to cassandra. - # CLI flag: -cassandra.password-file - [password_file: | default = ""] - - # If set, when authenticating with cassandra a custom authenticator will be - # expected during the handshake. This flag can be set multiple times. - # CLI flag: -cassandra.custom-authenticator - [custom_authenticators: | default = ] - - # Timeout when connecting to cassandra. - # CLI flag: -cassandra.timeout - [timeout: | default = 2s] - - # Initial connection timeout, used during initial dial to server. - # CLI flag: -cassandra.connect-timeout - [connect_timeout: | default = 5s] -======= # Timeout when connecting to cassandra. # CLI flag: -cassandra.timeout [timeout: | default = 600ms] @@ -1540,7 +1399,6 @@ cassandra: # Initial connection timeout, used during initial dial to server. # CLI flag: -cassandra.connect-timeout [connect_timeout: | default = 600ms] ->>>>>>> Removes proto compat go tests as everything looks good. # Number of retries to perform on a request. (Default is 0: no retries) # CLI flag: -cassandra.max-retries @@ -1577,11 +1435,7 @@ index_queries_cache_config: # Cache config for index entry reading. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-read.default-validity -<<<<<<< HEAD - [default_validity: | default = 0s] -======= [defaul_validity: | default = 0s] ->>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for index entry reading. How many goroutines to use to write @@ -1626,11 +1480,7 @@ chunk_cache_config: # Cache config for chunks. The default validity of entries for caches unless # overridden. # CLI flag: -default-validity -<<<<<<< HEAD - [default_validity: | default = 0s] -======= [defaul_validity: | default = 0s] ->>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for chunks. How many goroutines to use to write back to @@ -1665,11 +1515,7 @@ write_dedupe_cache_config: # Cache config for index entry writing. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-write.default-validity -<<<<<<< HEAD - [default_validity: | default = 0s] -======= [defaul_validity: | default = 0s] ->>>>>>> Removes proto compat go tests as everything looks good. background: # Cache config for index entry writing. How many goroutines to use to write @@ -1864,61 +1710,11 @@ The `memberlist_config` configures the Gossip memberlist. ```yaml # Name of the node in memberlist cluster. Defaults to hostname. -<<<<<<< HEAD -# CLI flag: -memberlist.nodename -======= # CLI flag: -.memberlist.nodename ->>>>>>> Removes proto compat go tests as everything looks good. [node_name: | default = ""] # The timeout for establishing a connection with a remote node, and for # read/write operations. Uses memberlist LAN defaults if 0. -<<<<<<< HEAD -# CLI flag: -memberlist.stream-timeout -[stream_timeout: | default = 0s] - -# Multiplication factor used when sending out messages (factor * log(N+1)). -# CLI flag: -memberlist.retransmit-factor -[retransmit_factor: | default = 0] - -# How often to use pull/push sync. Uses memberlist LAN defaults if 0. -# CLI flag: -memberlist.pullpush-interval -[pull_push_interval: | default = 0s] - -# How often to gossip. Uses memberlist LAN defaults if 0. -# CLI flag: -memberlist.gossip-interval -[gossip_interval: | default = 0s] - -# How many nodes to gossip to. Uses memberlist LAN defaults if 0. -# CLI flag: -memberlist.gossip-nodes -[gossip_nodes: | default = 0] - -# How long to keep gossiping to dead nodes, to give them chance to refute their -# death. Uses memberlist LAN defaults if 0. -# CLI flag: -memberlist.gossip-to-dead-nodes-time -[gossip_to_dead_nodes_time: | default = 0s] - -# How soon can dead node's name be reclaimed with new address. Defaults to 0, -# which is disabled. -# CLI flag: -memberlist.dead-node-reclaim-time -[dead_node_reclaim_time: | default = 0s] - -# Other cluster members to join. Can be specified multiple times. Memberlist -# store is EXPERIMENTAL. -# CLI flag: -memberlist.join -[join_members: | default = ] - -# If this node fails to join memberlist cluster, abort. -# CLI flag: -memberlist.abort-if-join-fails -[abort_if_cluster_join_fails: | default = true] - -# How long to keep LEFT ingesters in the ring. -# CLI flag: -memberlist.left-ingesters-timeout -[left_ingesters_timeout: | default = 5m0s] - -# Timeout for leaving memberlist cluster. -# CLI flag: -memberlist.leave-timeout -======= # CLI flag: -.memberlist.stream-timeout [stream_timeout: | default = 0s] @@ -1953,26 +1749,10 @@ The `memberlist_config` configures the Gossip memberlist. # Timeout for leaving memberlist cluster. # CLI flag: -.memberlist.leave-timeout ->>>>>>> Removes proto compat go tests as everything looks good. [leave_timeout: | default = 5s] # IP address to listen on for gossip messages. Multiple addresses may be # specified. Defaults to 0.0.0.0 -<<<<<<< HEAD -# CLI flag: -memberlist.bind-addr -[bind_addr: | default = ] - -# Port to listen on for gossip messages. -# CLI flag: -memberlist.bind-port -[bind_port: | default = 7946] - -# Timeout used when connecting to other nodes to send packet. -# CLI flag: -memberlist.packet-dial-timeout -[packet_dial_timeout: | default = 5s] - -# Timeout for writing 'packet' data. -# CLI flag: -memberlist.packet-write-timeout -======= # CLI flag: -.memberlist.bind-addr [bind_addr: | default = ] @@ -1986,7 +1766,6 @@ The `memberlist_config` configures the Gossip memberlist. # Timeout for writing 'packet' data. # CLI flag: -.memberlist.packet-write-timeout ->>>>>>> Removes proto compat go tests as everything looks good. [packet_write_timeout: | default = 5s] ``` @@ -2241,16 +2020,10 @@ The `configdb_config` configures the config database storing rules and alerts, a The `configstore_config` configures the config database storing rules and alerts, and is used by the Cortex alertmanager. ```yaml -<<<<<<< HEAD -# URL of configs API server. -# CLI flag: -.configs.url -[configsapiurl: | default = ] -======= configsapiurl: # URL of configs API server. # CLI flag: -.configs.url [url: | default = ] ->>>>>>> Removes proto compat go tests as everything looks good. # Timeout for requests to Weave Cloud configs service. # CLI flag: -.configs.client-timeout From eb11bb6f92a5c31e2b849bc6bfccdfa1de88887b Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 17:43:14 -0500 Subject: [PATCH 11/20] make doc, rebase, go mod update Signed-off-by: Cyril Tovena --- docs/configuration/config-file-reference.md | 33 ++++++++++++- go.mod | 4 +- go.sum | 4 ++ integration-tests/test-backcompat.sh | 34 -------------- .../weaveworks/common/server/server.go | 29 ++++++++++-- vendor/modules.txt | 47 ++----------------- 6 files changed, 65 insertions(+), 86 deletions(-) delete mode 100755 integration-tests/test-backcompat.sh diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index fabbb8902c..3faf24c5b9 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -165,6 +165,31 @@ The `server_config` configures the HTTP and gRPC server of the launched service( # CLI flag: -server.grpc-max-concurrent-streams [grpc_server_max_concurrent_streams: | default = 100] +# The duration after which an idle connection should be closed. Default: +# infinity +# CLI flag: -server.grpc.keepalive.max-connection-idle +[grpc_server_max_connection_idle: | default = 2562047h47m16.854775807s] + +# The duration for the maximum amount of time a connection may exist before it +# will be closed. Default: infinity +# CLI flag: -server.grpc.keepalive.max-connection-age +[grpc_server_max_connection_age: | default = 2562047h47m16.854775807s] + +# An additive period after max-connection-age after which the connection will be +# forcibly closed. Default: infinity +# CLI flag: -server.grpc.keepalive.max-connection-age-grace +[grpc_server_max_connection_age_grace: | default = 2562047h47m16.854775807s] + +# Duration after which a keepalive probe is sent in case of no activity over the +# connection., Default: 2h +# CLI flag: -server.grpc.keepalive.time +[grpc_server_keepalive_time: | default = 2h0m0s] + +# After having pinged for keepalive check, the duration after which an idle +# connection should be closed, Default: 20s +# CLI flag: -server.grpc.keepalive.timeout +[grpc_server_keepalive_timeout: | default = 20s] + # Only log messages with the given severity or above. Valid levels: [debug, # info, warn, error] # CLI flag: -log.level @@ -752,6 +777,10 @@ ring: # Period with which to attempt to flush rule groups. # CLI flag: -ruler.flush-period [flushcheckperiod: | default = 1m0s] + +# Enable the ruler api +# CLI flag: -experimental.ruler.enable-api +[enable_api: | default = false] ``` ## `alertmanager_config` @@ -1394,11 +1423,11 @@ cassandra: # Timeout when connecting to cassandra. # CLI flag: -cassandra.timeout - [timeout: | default = 600ms] + [timeout: | default = 2s] # Initial connection timeout, used during initial dial to server. # CLI flag: -cassandra.connect-timeout - [connect_timeout: | default = 600ms] + [connect_timeout: | default = 5s] # Number of retries to perform on a request. (Default is 0: no retries) # CLI flag: -cassandra.max-retries diff --git a/go.mod b/go.mod index 000e3a8ce6..b7618bbacf 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,7 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect github.com/uber/jaeger-client-go v2.20.1+incompatible github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 - github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 + github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a go.etcd.io/bbolt v1.3.3 go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5 go.uber.org/atomic v1.5.0 @@ -83,5 +83,3 @@ replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110 // Override reference that causes an error from Go proxy - see https://github.com/golang/go/issues/33558 replace k8s.io/client-go => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab - -replace github.com/weaveworks/common => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 diff --git a/go.sum b/go.sum index bcad9854dc..1eea9dbeb1 100644 --- a/go.sum +++ b/go.sum @@ -776,6 +776,10 @@ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 h1:qi+YkNiB7T3Ikw1DoDIFhdAPbDU7fUPDsKrUoZdupnQ= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1/go.mod h1:7gGdEUJaCrSlWi/mjd68CZv0sfqektYPDcro9cE+M9k= +github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 h1:nhoCrldzSm1le34sZfSNyTELYxIDaAmDw6PPVoEj5Mw= +github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1/go.mod h1:KLGX4H1D0lPVfHS/hqO9yrKvDzaT0bqYftIW43iLaOc= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a h1:4Sm4LnEnP1yQ2NeNgGqLTuN2xrTvcBOU+EsljpB8Ed0= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= diff --git a/integration-tests/test-backcompat.sh b/integration-tests/test-backcompat.sh deleted file mode 100755 index 468b75a443..0000000000 --- a/integration-tests/test-backcompat.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/sh - -set -e - -BACKWARD_IMAGE=v0.6.0 - -. "$(dirname "$0")/common.sh" - -"$(dirname "$0")/start-components.sh" - -# Note we haven't created any tables in the DB yet - we aren't going to run long enough to flush anything - -echo Start first ingester, using older version -docker run $RUN_ARGS -d --name=i1 --hostname=i1 "$IMAGE_PREFIX"cortex:$BACKWARD_IMAGE -target=ingester $INGESTER_ARGS -ingester.claim-on-rollout=true - -I1_ADDR=$(container_ip i1) -wait_for "curl -s -f -m 3 $I1_ADDR/ready" "ingester ready" - -has_tokens_owned() { - curl -s -f -m 3 $1/metrics | grep -q 'cortex_ring_tokens_owned' -} -DIST_ADDR=$(container_ip distributor) -wait_for "has_tokens_owned $DIST_ADDR" "distributor to see ingester in ring" - -docker run $RUN_ARGS --rm $AVALANCHE_IMAGE --metric-count=2 --label-count=2 --series-count=2 --remote-requests-count=1 --remote-url=http://distributor/api/prom/push - -echo Start second ingester, waiting for first -docker run $RUN_ARGS -d --name=i2 --hostname=i2 "$IMAGE_PREFIX"cortex:$IMAGE_TAG -target=ingester $INGESTER_ARGS -ingester.join-after=300s -ingester.claim-on-rollout=true - -echo Stop first ingester so it should hand over -docker stop i1 - -I2_ADDR=$(container_ip i2) -wait_for "curl -s -f -m 3 $I2_ADDR/ready" "ingester ready" diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index 14f0ecb710..748fa8f2c1 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -3,6 +3,7 @@ package server import ( "flag" "fmt" + "math" "net" "net/http" _ "net/http/pprof" // anonymous import to get the pprof handler registered @@ -17,6 +18,7 @@ import ( "golang.org/x/net/context" "golang.org/x/net/netutil" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" "github.com/weaveworks/common/httpgrpc" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" @@ -49,9 +51,14 @@ type Config struct { GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"` HTTPMiddleware []middleware.Interface `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` - GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` + GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` + GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` + GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` + GRPCServerTime time.Duration `yaml:"grpc_server_keepalive_time"` + GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` LogLevel logging.Level `yaml:"log_level"` Log logging.Interface `yaml:"-"` @@ -59,6 +66,8 @@ type Config struct { PathPrefix string `yaml:"http_path_prefix"` } +var infinty = time.Duration(math.MaxInt64) + // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.HTTPListenAddress, "server.http-listen-address", "", "HTTP server listen address.") @@ -75,6 +84,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).") f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)") + f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity") + f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity") + f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") + f.DurationVar(&cfg.GRPCServerTime, "server.grpc.keepalive.time", time.Hour*2, "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h") + f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") cfg.LogLevel.RegisterFlags(f) } @@ -151,6 +165,14 @@ func New(cfg Config) (*Server, error) { } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) + grpcKeepAliveOptions := keepalive.ServerParameters{ + MaxConnectionIdle: cfg.GRPCServerMaxConnectionIdle, + MaxConnectionAge: cfg.GRPCServerMaxConnectionAge, + MaxConnectionAgeGrace: cfg.GRPCServerMaxConnectionAgeGrace, + Time: cfg.GRPCServerTime, + Timeout: cfg.GRPCServerTimeout, + } + grpcOptions := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( grpcMiddleware..., @@ -158,6 +180,7 @@ func New(cfg Config) (*Server, error) { grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( grpcStreamMiddleware..., )), + grpc.KeepaliveParams(grpcKeepAliveOptions), grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), diff --git a/vendor/modules.txt b/vendor/modules.txt index e2c866ed60..050381115d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -452,26 +452,6 @@ github.com/prometheus/prometheus/discovery/zookeeper github.com/prometheus/prometheus/notifier github.com/prometheus/prometheus/pkg/exemplar github.com/prometheus/prometheus/pkg/gate -# github.com/prometheus/prometheus v1.8.2-0.20191126064551-80ba03c67da1 -github.com/prometheus/prometheus/config -github.com/prometheus/prometheus/discovery -github.com/prometheus/prometheus/discovery/azure -github.com/prometheus/prometheus/discovery/config -github.com/prometheus/prometheus/discovery/consul -github.com/prometheus/prometheus/discovery/dns -github.com/prometheus/prometheus/discovery/ec2 -github.com/prometheus/prometheus/discovery/file -github.com/prometheus/prometheus/discovery/gce -github.com/prometheus/prometheus/discovery/kubernetes -github.com/prometheus/prometheus/discovery/marathon -github.com/prometheus/prometheus/discovery/openstack -github.com/prometheus/prometheus/discovery/refresh -github.com/prometheus/prometheus/discovery/targetgroup -github.com/prometheus/prometheus/discovery/triton -github.com/prometheus/prometheus/discovery/zookeeper -github.com/prometheus/prometheus/notifier -github.com/prometheus/prometheus/pkg/exemplar -github.com/prometheus/prometheus/pkg/gate github.com/prometheus/prometheus/pkg/labels github.com/prometheus/prometheus/pkg/logging github.com/prometheus/prometheus/pkg/pool @@ -538,6 +518,8 @@ github.com/spf13/pflag # github.com/stretchr/objx v0.2.0 github.com/stretchr/objx # github.com/stretchr/testify v1.4.0 +github.com/stretchr/testify/assert +github.com/stretchr/testify/mock github.com/stretchr/testify/require # github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d github.com/thanos-io/thanos/pkg/block @@ -559,23 +541,6 @@ github.com/thanos-io/thanos/pkg/objstore/gcs github.com/thanos-io/thanos/pkg/objstore/s3 github.com/thanos-io/thanos/pkg/pool github.com/thanos-io/thanos/pkg/runutil -github.com/stretchr/testify/mock -github.com/stretchr/testify/assert -github.com/stretchr/testify/mock -github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.8.1-0.20200102143048-a37ac093a67a -github.com/thanos-io/thanos/pkg/block -github.com/thanos-io/thanos/pkg/block/metadata -github.com/thanos-io/thanos/pkg/compact -github.com/thanos-io/thanos/pkg/compact/downsample -github.com/thanos-io/thanos/pkg/component -github.com/thanos-io/thanos/pkg/extprom -github.com/thanos-io/thanos/pkg/model -github.com/thanos-io/thanos/pkg/objstore -github.com/thanos-io/thanos/pkg/objstore/gcs -github.com/thanos-io/thanos/pkg/objstore/s3 -github.com/thanos-io/thanos/pkg/pool -github.com/thanos-io/thanos/pkg/runutil github.com/thanos-io/thanos/pkg/shipper github.com/thanos-io/thanos/pkg/store github.com/thanos-io/thanos/pkg/store/cache @@ -609,13 +574,7 @@ github.com/uber/jaeger-lib/metrics github.com/uber/jaeger-lib/metrics/prometheus # github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 github.com/weaveworks/billing-client -# github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 -github.com/weaveworks/common/aws -# github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4 => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 -github.com/weaveworks/common/tracing -github.com/weaveworks/common/server -github.com/weaveworks/common/user -# github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 => github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 +# github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a github.com/weaveworks/common/aws github.com/weaveworks/common/errors github.com/weaveworks/common/httpgrpc From bbe92fbd48ac2b82c2c2cf5527b6fa0e31f88bf9 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 19:57:29 -0500 Subject: [PATCH 12/20] Adds label pair marshal test Signed-off-by: Cyril Tovena --- pkg/ingester/client/timeseries.go | 3 +++ pkg/ingester/client/timeseries_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 pkg/ingester/client/timeseries_test.go diff --git a/pkg/ingester/client/timeseries.go b/pkg/ingester/client/timeseries.go index 0f8894b1a4..ba1d433350 100644 --- a/pkg/ingester/client/timeseries.go +++ b/pkg/ingester/client/timeseries.go @@ -73,6 +73,9 @@ func (bs *LabelAdapter) Marshal() ([]byte, error) { size := bs.Size() buf := make([]byte, size) n, err := bs.MarshalToSizedBuffer(buf[:size]) + if err != nil { + return nil, err + } return buf[:n], err } diff --git a/pkg/ingester/client/timeseries_test.go b/pkg/ingester/client/timeseries_test.go new file mode 100644 index 0000000000..d84a653fb4 --- /dev/null +++ b/pkg/ingester/client/timeseries_test.go @@ -0,0 +1,26 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLabelAdapter_Marshal(t *testing.T) { + tests := []struct { + bs *LabelAdapter + }{ + {&LabelAdapter{Name: "foo", Value: "bar"}}, + {&LabelAdapter{Name: "very long label name", Value: "very long label value"}}, + {&LabelAdapter{}}, + } + for _, tt := range tests { + t.Run(tt.bs.Name, func(t *testing.T) { + bytes, err := tt.bs.Marshal() + require.NoError(t, err) + lbs := &LabelAdapter{} + require.NoError(t, lbs.Unmarshal(bytes)) + require.EqualValues(t, tt.bs, lbs) + }) + } +} From c558afbcdffbdc7de7f565928b94061992f79e5f Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 20:04:19 -0500 Subject: [PATCH 13/20] Rollback to my own build image. Signed-off-by: Cyril Tovena --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 32d6cd6220..ea08a1e0dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ defaults: &defaults docker: - - image: quay.io/cortexproject/cortex:update-gogoproto-6e528d554 + - image: ctovena/cortex-build:update-gogoproto-2294034c working_directory: /go/src/github.com/cortexproject/cortex workflows: From 8d7937ff6ec9e94e815f5c71c735e717f14465e9 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 20:12:21 -0500 Subject: [PATCH 14/20] Update go sum Signed-off-by: Cyril Tovena --- go.sum | 4 ---- 1 file changed, 4 deletions(-) diff --git a/go.sum b/go.sum index 1eea9dbeb1..f69ed698bd 100644 --- a/go.sum +++ b/go.sum @@ -160,8 +160,6 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870 h1:FGV8BkEkS0ah3fN5DJR3VnWb2XVaVwTPiVdPM5QrfqI= -github.com/cyriltovena/common v0.0.0-20200130195811-b9d950b97870/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= @@ -776,8 +774,6 @@ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 h1:qi+YkNiB7T3Ikw1DoDIFhdAPbDU7fUPDsKrUoZdupnQ= github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1/go.mod h1:7gGdEUJaCrSlWi/mjd68CZv0sfqektYPDcro9cE+M9k= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 h1:nhoCrldzSm1le34sZfSNyTELYxIDaAmDw6PPVoEj5Mw= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1/go.mod h1:KLGX4H1D0lPVfHS/hqO9yrKvDzaT0bqYftIW43iLaOc= github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a h1:4Sm4LnEnP1yQ2NeNgGqLTuN2xrTvcBOU+EsljpB8Ed0= github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= From 645b8477f4ee92732fd004e174d6ea46a6ee83f8 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 6 Feb 2020 20:22:29 -0500 Subject: [PATCH 15/20] Update ruler proto with 1.3 Signed-off-by: Cyril Tovena --- pkg/ruler/ruler.pb.go | 77 ++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 27 deletions(-) diff --git a/pkg/ruler/ruler.pb.go b/pkg/ruler/ruler.pb.go index a70d058ece..e8580edf21 100644 --- a/pkg/ruler/ruler.pb.go +++ b/pkg/ruler/ruler.pb.go @@ -10,8 +10,11 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -25,7 +28,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type RulesRequest struct { } @@ -43,7 +46,7 @@ func (m *RulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RulesRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -79,7 +82,7 @@ func (m *RulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_RulesResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -249,6 +252,14 @@ type RulerServer interface { Rules(context.Context, *RulesRequest) (*RulesResponse, error) } +// UnimplementedRulerServer can be embedded to have forward compatible implementations. +type UnimplementedRulerServer struct { +} + +func (*UnimplementedRulerServer) Rules(ctx context.Context, req *RulesRequest) (*RulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Rules not implemented") +} + func RegisterRulerServer(s *grpc.Server, srv RulerServer) { s.RegisterService(&_Ruler_serviceDesc, srv) } @@ -287,7 +298,7 @@ var _Ruler_serviceDesc = grpc.ServiceDesc{ func (m *RulesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -295,17 +306,22 @@ func (m *RulesRequest) Marshal() (dAtA []byte, err error) { } func (m *RulesRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *RulesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -313,33 +329,42 @@ func (m *RulesResponse) Marshal() (dAtA []byte, err error) { } func (m *RulesResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RulesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintRuler(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintRuler(dAtA []byte, offset int, v uint64) int { + offset -= sovRuler(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RulesRequest) Size() (n int) { if m == nil { @@ -366,14 +391,7 @@ func (m *RulesResponse) Size() (n int) { } func sovRuler(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozRuler(x uint64) (n int) { return sovRuler(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -391,8 +409,13 @@ func (this *RulesResponse) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]*RuleGroupDesc{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(fmt.Sprintf("%v", f), "RuleGroupDesc", "rules.RuleGroupDesc", 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&RulesResponse{`, - `Groups:` + strings.Replace(fmt.Sprintf("%v", this.Groups), "RuleGroupDesc", "rules.RuleGroupDesc", 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s From 9d527417527b5b18357700e48717ce3793537423 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Fri, 7 Feb 2020 08:40:03 -0500 Subject: [PATCH 16/20] quay build image. Signed-off-by: Cyril Tovena --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ea08a1e0dd..4585924f58 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ defaults: &defaults docker: - - image: ctovena/cortex-build:update-gogoproto-2294034c + - image: quay.io/cortexproject/build-image:update-gogoproto-9a1b26ac6 working_directory: /go/src/github.com/cortexproject/cortex workflows: From db944b6990dab51d0a5e0d8e639f6b2bef764004 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Tue, 18 Feb 2020 16:16:43 -0500 Subject: [PATCH 17/20] Update doc. Signed-off-by: Cyril Tovena --- docs/configuration/config-file-reference.md | 160 +++++++++++--------- 1 file changed, 85 insertions(+), 75 deletions(-) diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 3faf24c5b9..1df44c6fd2 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -86,11 +86,6 @@ Supported contents and default values of the config file: # and used by the 'configs' service to expose APIs to manage them. [configdb: ] -# The configstore_config configures the config database storing rules and -# alerts, and is used by the Cortex alertmanager. -# The CLI flags prefix for this block config is: alertmanager -[config_store: ] - # The alertmanager_config configures the Cortex alertmanager. [alertmanager: ] @@ -102,6 +97,9 @@ runtime_config: # File with the configuration that can be updated in runtime. # CLI flag: -runtime-config.file [file: | default = ""] + +# The memberlist_config configures the Gossip memberlist. +[memberlist: ] ``` ## `server_config` @@ -271,10 +269,6 @@ ha_tracker: # The CLI flags prefix for this block config is: distributor.ha-tracker [etcd: ] - # The memberlist_config configures the Gossip memberlist. - # The CLI flags prefix for this block config is: distributor.ha-tracker - [memberlist: ] - multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ha-tracker.multi.primary @@ -328,10 +322,6 @@ ring: # The CLI flags prefix for this block config is: distributor.ring [etcd: ] - # The memberlist_config configures the Gossip memberlist. - # The CLI flags prefix for this block config is: distributor.ring - [memberlist: ] - multi: # Primary backend storage used by multi-client. # CLI flag: -distributor.ring.multi.primary @@ -403,9 +393,6 @@ lifecycler: # The etcd_config configures the etcd client. [etcd: ] - # The memberlist_config configures the Gossip memberlist. - [memberlist: ] - multi: # Primary backend storage used by multi-client. # CLI flag: -multi.primary @@ -612,7 +599,7 @@ results_cache: # The default validity of entries for caches unless overridden. # CLI flag: -frontend.default-validity - [defaul_validity: | default = 0s] + [default_validity: | default = 0s] background: # How many goroutines to use to write back to memcache. @@ -661,10 +648,9 @@ results_cache: The `ruler_config` configures the Cortex ruler. ```yaml -externalurl: - # URL of alerts return path. - # CLI flag: -ruler.external.url - [url: | default = ] +# URL of alerts return path. +# CLI flag: -ruler.external.url +[externalurl: | default = ] # How frequently to evaluate rules # CLI flag: -ruler.evaluation-interval @@ -688,10 +674,9 @@ storeconfig: # CLI flag: -ruler.rule-path [rulepath: | default = "/rules"] -alertmanagerurl: - # URL of the Alertmanager to send notifications to. - # CLI flag: -ruler.alertmanager-url - [url: | default = ] +# URL of the Alertmanager to send notifications to. +# CLI flag: -ruler.alertmanager-url +[alertmanagerurl: | default = ] # Use DNS SRV records to discover alertmanager hosts. # CLI flag: -ruler.alertmanager-discovery @@ -740,10 +725,6 @@ ring: # The CLI flags prefix for this block config is: ruler.ring [etcd: ] - # The memberlist_config configures the Gossip memberlist. - # The CLI flags prefix for this block config is: ruler.ring - [memberlist: ] - multi: # Primary backend storage used by multi-client. # CLI flag: -ruler.ring.multi.primary @@ -796,15 +777,13 @@ The `alertmanager_config` configures the Cortex alertmanager. # CLI flag: -alertmanager.storage.retention [retention: | default = 120h0m0s] -externalurl: - # The URL under which Alertmanager is externally reachable (for example, if - # Alertmanager is served via a reverse proxy). Used for generating relative - # and absolute links back to Alertmanager itself. If the URL has a path - # portion, it will be used to prefix all HTTP endpoints served by - # Alertmanager. If omitted, relevant URL components will be derived - # automatically. - # CLI flag: -alertmanager.web.external-url - [url: | default = ] +# The URL under which Alertmanager is externally reachable (for example, if +# Alertmanager is served via a reverse proxy). Used for generating relative and +# absolute links back to Alertmanager itself. If the URL has a path portion, it +# will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, +# relevant URL components will be derived automatically. +# CLI flag: -alertmanager.web.external-url +[externalurl: | default = ] # How frequently to poll Cortex configs # CLI flag: -alertmanager.configs.poll-interval @@ -833,6 +812,22 @@ externalurl: # Root of URL to generate if config is http://internal.monitor # CLI flag: -alertmanager.configs.auto-webhook-root [autowebhookroot: | default = ""] + +store: + # Type of backend to use to store alertmanager configs. Supported values are: + # "configdb", "local". + # CLI flag: -alertmanager.storage.type + [type: | default = "configdb"] + + # The configstore_config configures the config database storing rules and + # alerts, and is used by the Cortex alertmanager. + # The CLI flags prefix for this block config is: alertmanager + [configdb: ] + + local: + # Path at which alertmanager configurations are stored. + # CLI flag: -alertmanager.storage.local.path + [path: | default = ""] ``` ## `table_manager_config` @@ -1177,12 +1172,11 @@ The `storage_config` configures where Cortex stores the data (chunks storage eng aws: dynamodbconfig: - dynamodb: - # DynamoDB endpoint URL with escaped Key and Secret encoded. If only - # region is specified as a host, proper endpoint will be deduced. Use - # inmemory:/// to use a mock in-memory implementation. - # CLI flag: -dynamodb.url - [url: | default = ] + # DynamoDB endpoint URL with escaped Key and Secret encoded. If only region + # is specified as a host, proper endpoint will be deduced. Use + # inmemory:/// to use a mock in-memory implementation. + # CLI flag: -dynamodb.url + [dynamodb: | default = ] # DynamoDB table management requests per second limit. # CLI flag: -dynamodb.api-limit @@ -1192,10 +1186,9 @@ aws: # CLI flag: -dynamodb.throttle-limit [throttlelimit: | default = 10] - applicationautoscaling: - # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. - # CLI flag: -applicationautoscaling.url - [url: | default = ] + # ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded. + # CLI flag: -applicationautoscaling.url + [applicationautoscaling: | default = ] metrics: # Use metrics-based autoscaling, via this query URL @@ -1243,12 +1236,11 @@ aws: # CLI flag: -dynamodb.chunk.get.max.parallelism [chunkgetmaxparallelism: | default = 32] - s3: - # S3 endpoint URL with escaped Key and Secret encoded. If only region is - # specified as a host, proper endpoint will be deduced. Use - # inmemory:/// to use a mock in-memory implementation. - # CLI flag: -s3.url - [url: | default = ] + # S3 endpoint URL with escaped Key and Secret encoded. If only region is + # specified as a host, proper endpoint will be deduced. Use + # inmemory:/// to use a mock in-memory implementation. + # CLI flag: -s3.url + [s3: | default = ] # Comma separated list of bucket names to evenly distribute chunks over. # Overrides any buckets specified in s3.url flag @@ -1421,6 +1413,15 @@ cassandra: # CLI flag: -cassandra.password [password: | default = ""] + # File containing password to use when connecting to cassandra. + # CLI flag: -cassandra.password-file + [password_file: | default = ""] + + # If set, when authenticating with cassandra a custom authenticator will be + # expected during the handshake. This flag can be set multiple times. + # CLI flag: -cassandra.custom-authenticator + [custom_authenticators: | default = ] + # Timeout when connecting to cassandra. # CLI flag: -cassandra.timeout [timeout: | default = 2s] @@ -1464,7 +1465,7 @@ index_queries_cache_config: # Cache config for index entry reading. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-read.default-validity - [defaul_validity: | default = 0s] + [default_validity: | default = 0s] background: # Cache config for index entry reading. How many goroutines to use to write @@ -1509,7 +1510,7 @@ chunk_cache_config: # Cache config for chunks. The default validity of entries for caches unless # overridden. # CLI flag: -default-validity - [defaul_validity: | default = 0s] + [default_validity: | default = 0s] background: # Cache config for chunks. How many goroutines to use to write back to @@ -1544,7 +1545,7 @@ write_dedupe_cache_config: # Cache config for index entry writing. The default validity of entries for # caches unless overridden. # CLI flag: -store.index-cache-write.default-validity - [defaul_validity: | default = 0s] + [default_validity: | default = 0s] background: # Cache config for index entry writing. How many goroutines to use to write @@ -1739,62 +1740,72 @@ The `memberlist_config` configures the Gossip memberlist. ```yaml # Name of the node in memberlist cluster. Defaults to hostname. -# CLI flag: -.memberlist.nodename +# CLI flag: -memberlist.nodename [node_name: | default = ""] # The timeout for establishing a connection with a remote node, and for # read/write operations. Uses memberlist LAN defaults if 0. -# CLI flag: -.memberlist.stream-timeout +# CLI flag: -memberlist.stream-timeout [stream_timeout: | default = 0s] # Multiplication factor used when sending out messages (factor * log(N+1)). -# CLI flag: -.memberlist.retransmit-factor +# CLI flag: -memberlist.retransmit-factor [retransmit_factor: | default = 0] # How often to use pull/push sync. Uses memberlist LAN defaults if 0. -# CLI flag: -.memberlist.pullpush-interval +# CLI flag: -memberlist.pullpush-interval [pull_push_interval: | default = 0s] # How often to gossip. Uses memberlist LAN defaults if 0. -# CLI flag: -.memberlist.gossip-interval +# CLI flag: -memberlist.gossip-interval [gossip_interval: | default = 0s] # How many nodes to gossip to. Uses memberlist LAN defaults if 0. -# CLI flag: -.memberlist.gossip-nodes +# CLI flag: -memberlist.gossip-nodes [gossip_nodes: | default = 0] +# How long to keep gossiping to dead nodes, to give them chance to refute their +# death. Uses memberlist LAN defaults if 0. +# CLI flag: -memberlist.gossip-to-dead-nodes-time +[gossip_to_dead_nodes_time: | default = 0s] + +# How soon can dead node's name be reclaimed with new address. Defaults to 0, +# which is disabled. +# CLI flag: -memberlist.dead-node-reclaim-time +[dead_node_reclaim_time: | default = 0s] + # Other cluster members to join. Can be specified multiple times. Memberlist # store is EXPERIMENTAL. -# CLI flag: -.memberlist.join +# CLI flag: -memberlist.join [join_members: | default = ] # If this node fails to join memberlist cluster, abort. -# CLI flag: -.memberlist.abort-if-join-fails +# CLI flag: -memberlist.abort-if-join-fails [abort_if_cluster_join_fails: | default = true] # How long to keep LEFT ingesters in the ring. -# CLI flag: -.memberlist.left-ingesters-timeout +# CLI flag: -memberlist.left-ingesters-timeout [left_ingesters_timeout: | default = 5m0s] # Timeout for leaving memberlist cluster. -# CLI flag: -.memberlist.leave-timeout +# CLI flag: -memberlist.leave-timeout [leave_timeout: | default = 5s] # IP address to listen on for gossip messages. Multiple addresses may be # specified. Defaults to 0.0.0.0 -# CLI flag: -.memberlist.bind-addr +# CLI flag: -memberlist.bind-addr [bind_addr: | default = ] # Port to listen on for gossip messages. -# CLI flag: -.memberlist.bind-port +# CLI flag: -memberlist.bind-port [bind_port: | default = 7946] # Timeout used when connecting to other nodes to send packet. -# CLI flag: -.memberlist.packet-dial-timeout +# CLI flag: -memberlist.packet-dial-timeout [packet_dial_timeout: | default = 5s] # Timeout for writing 'packet' data. -# CLI flag: -.memberlist.packet-write-timeout +# CLI flag: -memberlist.packet-write-timeout [packet_write_timeout: | default = 5s] ``` @@ -2049,10 +2060,9 @@ The `configdb_config` configures the config database storing rules and alerts, a The `configstore_config` configures the config database storing rules and alerts, and is used by the Cortex alertmanager. ```yaml -configsapiurl: - # URL of configs API server. - # CLI flag: -.configs.url - [url: | default = ] +# URL of configs API server. +# CLI flag: -.configs.url +[configsapiurl: | default = ] # Timeout for requests to Weave Cloud configs service. # CLI flag: -.configs.client-timeout From fbd92c1b2fabf76eaa52370e4bd82993d9bd9f0c Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Tue, 18 Feb 2020 16:21:47 -0500 Subject: [PATCH 18/20] Regenerate new proto. Signed-off-by: Cyril Tovena --- pkg/alertmanager/alerts/alerts.pb.go | 111 +++++++++++++++------------ pkg/ring/kv/memberlist/kv.pb.go | 96 +++++++++++++---------- 2 files changed, 120 insertions(+), 87 deletions(-) diff --git a/pkg/alertmanager/alerts/alerts.pb.go b/pkg/alertmanager/alerts/alerts.pb.go index 016c42a416..3c4d815b8a 100644 --- a/pkg/alertmanager/alerts/alerts.pb.go +++ b/pkg/alertmanager/alerts/alerts.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -22,7 +23,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type AlertConfigDesc struct { User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -43,7 +44,7 @@ func (m *AlertConfigDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_AlertConfigDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -101,7 +102,7 @@ func (m *TemplateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_TemplateDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -259,7 +260,7 @@ func valueToGoStringAlerts(v interface{}, typ string) string { func (m *AlertConfigDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -267,41 +268,50 @@ func (m *AlertConfigDesc) Marshal() (dAtA []byte, err error) { } func (m *AlertConfigDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlertConfigDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.User) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAlerts(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) + if len(m.Templates) > 0 { + for iNdEx := len(m.Templates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Templates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAlerts(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } if len(m.RawConfig) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.RawConfig) + copy(dAtA[i:], m.RawConfig) i = encodeVarintAlerts(dAtA, i, uint64(len(m.RawConfig))) - i += copy(dAtA[i:], m.RawConfig) + i-- + dAtA[i] = 0x12 } - if len(m.Templates) > 0 { - for _, msg := range m.Templates { - dAtA[i] = 0x1a - i++ - i = encodeVarintAlerts(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TemplateDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -309,33 +319,42 @@ func (m *TemplateDesc) Marshal() (dAtA []byte, err error) { } func (m *TemplateDesc) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Filename) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAlerts(dAtA, i, uint64(len(m.Filename))) - i += copy(dAtA[i:], m.Filename) - } if len(m.Body) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Body) + copy(dAtA[i:], m.Body) i = encodeVarintAlerts(dAtA, i, uint64(len(m.Body))) - i += copy(dAtA[i:], m.Body) + i-- + dAtA[i] = 0x12 + } + if len(m.Filename) > 0 { + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintAlerts(dAtA []byte, offset int, v uint64) int { + offset -= sovAlerts(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AlertConfigDesc) Size() (n int) { if m == nil { @@ -378,14 +397,7 @@ func (m *TemplateDesc) Size() (n int) { } func sovAlerts(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozAlerts(x uint64) (n int) { return sovAlerts(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -394,10 +406,15 @@ func (this *AlertConfigDesc) String() string { if this == nil { return "nil" } + repeatedStringForTemplates := "[]*TemplateDesc{" + for _, f := range this.Templates { + repeatedStringForTemplates += strings.Replace(f.String(), "TemplateDesc", "TemplateDesc", 1) + "," + } + repeatedStringForTemplates += "}" s := strings.Join([]string{`&AlertConfigDesc{`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `RawConfig:` + fmt.Sprintf("%v", this.RawConfig) + `,`, - `Templates:` + strings.Replace(fmt.Sprintf("%v", this.Templates), "TemplateDesc", "TemplateDesc", 1) + `,`, + `Templates:` + repeatedStringForTemplates + `,`, `}`, }, "") return s diff --git a/pkg/ring/kv/memberlist/kv.pb.go b/pkg/ring/kv/memberlist/kv.pb.go index 610a2f40f5..4c2eb9265b 100644 --- a/pkg/ring/kv/memberlist/kv.pb.go +++ b/pkg/ring/kv/memberlist/kv.pb.go @@ -10,6 +10,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -23,7 +24,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // KV Store is just a series of key-value pairs. type KeyValueStore struct { @@ -43,7 +44,7 @@ func (m *KeyValueStore) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_KeyValueStore.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -90,7 +91,7 @@ func (m *KeyValuePair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_KeyValuePair.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -250,7 +251,7 @@ func valueToGoStringKv(v interface{}, typ string) string { func (m *KeyValueStore) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -258,29 +259,36 @@ func (m *KeyValueStore) Marshal() (dAtA []byte, err error) { } func (m *KeyValueStore) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValueStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Pairs) > 0 { - for _, msg := range m.Pairs { - dAtA[i] = 0xa - i++ - i = encodeVarintKv(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintKv(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *KeyValuePair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -288,39 +296,49 @@ func (m *KeyValuePair) Marshal() (dAtA []byte, err error) { } func (m *KeyValuePair) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValuePair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) + if len(m.Codec) > 0 { + i -= len(m.Codec) + copy(dAtA[i:], m.Codec) + i = encodeVarintKv(dAtA, i, uint64(len(m.Codec))) + i-- + dAtA[i] = 0x1a } if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Value) + copy(dAtA[i:], m.Value) i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) + i-- + dAtA[i] = 0x12 } - if len(m.Codec) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Codec))) - i += copy(dAtA[i:], m.Codec) + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + offset -= sovKv(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *KeyValueStore) Size() (n int) { if m == nil { @@ -359,14 +377,7 @@ func (m *KeyValuePair) Size() (n int) { } func sovKv(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozKv(x uint64) (n int) { return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -375,8 +386,13 @@ func (this *KeyValueStore) String() string { if this == nil { return "nil" } + repeatedStringForPairs := "[]*KeyValuePair{" + for _, f := range this.Pairs { + repeatedStringForPairs += strings.Replace(f.String(), "KeyValuePair", "KeyValuePair", 1) + "," + } + repeatedStringForPairs += "}" s := strings.Join([]string{`&KeyValueStore{`, - `Pairs:` + strings.Replace(fmt.Sprintf("%v", this.Pairs), "KeyValuePair", "KeyValuePair", 1) + `,`, + `Pairs:` + repeatedStringForPairs + `,`, `}`, }, "") return s From b1b5f88c522caabdfd43e94b26996d6708b44503 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 20 Feb 2020 08:19:48 -0500 Subject: [PATCH 19/20] Add empty name tests for LabelAdapter Unmarshal. Signed-off-by: Cyril Tovena --- pkg/ingester/client/timeseries_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/ingester/client/timeseries_test.go b/pkg/ingester/client/timeseries_test.go index d84a653fb4..c0b2513584 100644 --- a/pkg/ingester/client/timeseries_test.go +++ b/pkg/ingester/client/timeseries_test.go @@ -12,6 +12,7 @@ func TestLabelAdapter_Marshal(t *testing.T) { }{ {&LabelAdapter{Name: "foo", Value: "bar"}}, {&LabelAdapter{Name: "very long label name", Value: "very long label value"}}, + {&LabelAdapter{Name: "", Value: "foo"}}, {&LabelAdapter{}}, } for _, tt := range tests { From 63a87b265d577975c6d762933771caf003f96887 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 20 Feb 2020 08:24:31 -0500 Subject: [PATCH 20/20] Remove changelog item for this PR Signed-off-by: Cyril Tovena --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79b9714bc7..9df155d04c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,6 @@ ## master / unreleased -* [CHANGE] Upgraded gogo/protobuf tool chain from 1.2.1 to 1.3.0. #2055 * [CHANGE] Config file changed to remove top level `config_store` field in favor of a nested `configdb` field. #2125 * [CHANGE] Removed unnecessary `frontend.cache-split-interval` in favor of `querier.split-queries-by-interval` both to reduce configuration complexity and guarantee alignment of these two configs. Starting from now, `-querier.cache-results` may only be enabled in conjunction with `-querier.split-queries-by-interval` (previously the cache interval default was `24h` so if you want to preserve the same behaviour you should set `-querier.split-queries-by-interval=24h`). #2040 * [CHANGE] Removed remaining support for using denormalised tokens in the ring. If you're still running ingesters with denormalised tokens (Cortex 0.4 or earlier, with `-ingester.normalise-tokens=false`), such ingesters will now be completely invisible to distributors and need to be either switched to Cortex 0.6.0 or later, or be configured to use normalised tokens. #2034