+
+`
+
+ doc, err := goquery.NewDocumentFromReader(strings.NewReader(data))
+ if err != nil {
+ log.Fatal(err)
+ }
+ header := doc.Find("h1").Text()
+ fmt.Println(header)
+
+ // Output: Header
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/expand.go b/vendor/github.com/PuerkitoBio/goquery/expand.go
new file mode 100644
index 0000000..7caade5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/expand.go
@@ -0,0 +1,70 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Add adds the selector string's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The selector string is run in the context of the document of the current
+// Selection object.
+func (s *Selection) Add(selector string) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
+}
+
+// AddMatcher adds the matcher's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The matcher is run in the context of the document of the current
+// Selection object.
+func (s *Selection) AddMatcher(m Matcher) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
+}
+
+// AddSelection adds the specified Selection object's nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.AddNodes()
+ }
+ return s.AddNodes(sel.Nodes...)
+}
+
+// Union is an alias for AddSelection.
+func (s *Selection) Union(sel *Selection) *Selection {
+ return s.AddSelection(sel)
+}
+
+// AddNodes adds the specified nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
+}
+
+// AndSelf adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+// Deprecated: This function has been deprecated and is now an alias for AddBack().
+func (s *Selection) AndSelf() *Selection {
+ return s.AddBack()
+}
+
+// AddBack adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+func (s *Selection) AddBack() *Selection {
+ return s.AddSelection(s.prevSel)
+}
+
+// AddBackFiltered reduces the previous set of elements on the stack to those that
+// match the selector string, and adds them to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackFiltered(selector string) *Selection {
+ return s.AddSelection(s.prevSel.Filter(selector))
+}
+
+// AddBackMatcher reduces the previous set of elements on the stack to those that match
+// the mateher, and adds them to the curernt set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackMatcher(m Matcher) *Selection {
+ return s.AddSelection(s.prevSel.FilterMatcher(m))
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/expand_test.go b/vendor/github.com/PuerkitoBio/goquery/expand_test.go
new file mode 100644
index 0000000..c034dc6
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/expand_test.go
@@ -0,0 +1,118 @@
+package goquery
+
+import (
+ "testing"
+)
+
+func TestAdd(t *testing.T) {
+ sel := Doc().Find("div.row-fluid").Add("a")
+ assertLength(t, sel.Nodes, 19)
+}
+
+func TestAddInvalid(t *testing.T) {
+ sel1 := Doc().Find("div.row-fluid")
+ sel2 := sel1.Add("")
+ assertLength(t, sel1.Nodes, 9)
+ assertLength(t, sel2.Nodes, 9)
+ if sel1 == sel2 {
+ t.Errorf("selections should not be the same")
+ }
+}
+
+func TestAddRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Add("a").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestAddSelection(t *testing.T) {
+ sel := Doc().Find("div.row-fluid")
+ sel2 := Doc().Find("a")
+ sel = sel.AddSelection(sel2)
+ assertLength(t, sel.Nodes, 19)
+}
+
+func TestAddSelectionNil(t *testing.T) {
+ sel := Doc().Find("div.row-fluid")
+ assertLength(t, sel.Nodes, 9)
+
+ sel = sel.AddSelection(nil)
+ assertLength(t, sel.Nodes, 9)
+}
+
+func TestAddSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Find("a")
+ sel2 = sel.AddSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestAddNodes(t *testing.T) {
+ sel := Doc().Find("div.pvk-gutter")
+ sel2 := Doc().Find(".pvk-content")
+ sel = sel.AddNodes(sel2.Nodes...)
+ assertLength(t, sel.Nodes, 9)
+}
+
+func TestAddNodesNone(t *testing.T) {
+ sel := Doc().Find("div.pvk-gutter").AddNodes()
+ assertLength(t, sel.Nodes, 6)
+}
+
+func TestAddNodesRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Find("a")
+ sel2 = sel.AddNodes(sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestAddNodesBig(t *testing.T) {
+ doc := DocW()
+ sel := doc.Find("li")
+ assertLength(t, sel.Nodes, 373)
+ sel2 := doc.Find("xyz")
+ assertLength(t, sel2.Nodes, 0)
+
+ nodes := sel.Nodes
+ sel2 = sel2.AddNodes(nodes...)
+ assertLength(t, sel2.Nodes, 373)
+ nodes2 := append(nodes, nodes...)
+ sel2 = sel2.End().AddNodes(nodes2...)
+ assertLength(t, sel2.Nodes, 373)
+ nodes3 := append(nodes2, nodes...)
+ sel2 = sel2.End().AddNodes(nodes3...)
+ assertLength(t, sel2.Nodes, 373)
+}
+
+func TestAndSelf(t *testing.T) {
+ sel := Doc().Find(".span12").Last().AndSelf()
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestAndSelfRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Find("a").AndSelf().End().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestAddBack(t *testing.T) {
+ sel := Doc().Find(".span12").Last().AddBack()
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestAddBackRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Find("a").AddBack().End().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestAddBackFiltered(t *testing.T) {
+ sel := Doc().Find(".span12, .footer").Find("h1").AddBackFiltered(".footer")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestAddBackFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".span12, .footer")
+ sel2 := sel.Find("h1").AddBackFiltered(".footer").End().End()
+ assertEqual(t, sel, sel2)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/filter.go b/vendor/github.com/PuerkitoBio/goquery/filter.go
new file mode 100644
index 0000000..9138ffb
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/filter.go
@@ -0,0 +1,163 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Filter reduces the set of matched elements to those that match the selector string.
+// It returns a new Selection object for this subset of matching elements.
+func (s *Selection) Filter(selector string) *Selection {
+ return s.FilterMatcher(compileMatcher(selector))
+}
+
+// FilterMatcher reduces the set of matched elements to those that match
+// the given matcher. It returns a new Selection object for this subset
+// of matching elements.
+func (s *Selection) FilterMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, true))
+}
+
+// Not removes elements from the Selection that match the selector string.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) Not(selector string) *Selection {
+ return s.NotMatcher(compileMatcher(selector))
+}
+
+// NotMatcher removes elements from the Selection that match the given matcher.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, false))
+}
+
+// FilterFunction reduces the set of matched elements to those that pass the function's test.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, true))
+}
+
+// NotFunction removes elements from the Selection that pass the function's test.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, false))
+}
+
+// FilterNodes reduces the set of matched elements to those that match the specified nodes.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, true))
+}
+
+// NotNodes removes elements from the Selection that match the specified nodes.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, false))
+}
+
+// FilterSelection reduces the set of matched elements to those that match a
+// node in the specified Selection object.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, true))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, true))
+}
+
+// NotSelection removes elements from the Selection that match a node in the specified
+// Selection object. It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, false))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, false))
+}
+
+// Intersection is an alias for FilterSelection.
+func (s *Selection) Intersection(sel *Selection) *Selection {
+ return s.FilterSelection(sel)
+}
+
+// Has reduces the set of matched elements to those that have a descendant
+// that matches the selector.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) Has(selector string) *Selection {
+ return s.HasSelection(s.document.Find(selector))
+}
+
+// HasMatcher reduces the set of matched elements to those that have a descendant
+// that matches the matcher.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasMatcher(m Matcher) *Selection {
+ return s.HasSelection(s.document.FindMatcher(m))
+}
+
+// HasNodes reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
+ return s.FilterFunction(func(_ int, sel *Selection) bool {
+ // Add all nodes that contain one of the specified nodes
+ for _, n := range nodes {
+ if sel.Contains(n) {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// HasSelection reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes of the specified Selection object.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.HasNodes()
+ }
+ return s.HasNodes(sel.Nodes...)
+}
+
+// End ends the most recent filtering operation in the current chain and
+// returns the set of matched elements to its previous state.
+func (s *Selection) End() *Selection {
+ if s.prevSel != nil {
+ return s.prevSel
+ }
+ return newEmptySelection(s.document)
+}
+
+// Filter based on the matcher, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
+ // Optimize if keep is requested
+ if keep {
+ return m.Filter(sel.Nodes)
+ }
+ // Use grep
+ return grep(sel, func(i int, s *Selection) bool {
+ return !m.Match(s.Get(0))
+ })
+}
+
+// Filter based on an array of nodes, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
+ if len(nodes)+len(sel.Nodes) < minNodesForSet {
+ return grep(sel, func(i int, s *Selection) bool {
+ return isInSlice(nodes, s.Get(0)) == keep
+ })
+ }
+
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return grep(sel, func(i int, s *Selection) bool {
+ return set[s.Get(0)] == keep
+ })
+}
+
+// Filter based on a function test, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
+ return grep(sel, func(i int, s *Selection) bool {
+ return f(i, s) == keep
+ })
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/filter_test.go b/vendor/github.com/PuerkitoBio/goquery/filter_test.go
new file mode 100644
index 0000000..f663c08
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/filter_test.go
@@ -0,0 +1,206 @@
+package goquery
+
+import (
+ "testing"
+)
+
+func TestFilter(t *testing.T) {
+ sel := Doc().Find(".span12").Filter(".alert")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestFilterNone(t *testing.T) {
+ sel := Doc().Find(".span12").Filter(".zzalert")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestFilterInvalid(t *testing.T) {
+ sel := Doc().Find(".span12").Filter("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestFilterRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Filter(".alert").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestFilterFunction(t *testing.T) {
+ sel := Doc().Find(".pvk-content").FilterFunction(func(i int, s *Selection) bool {
+ return i > 0
+ })
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestFilterFunctionRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.FilterFunction(func(i int, s *Selection) bool {
+ return i > 0
+ }).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestFilterNode(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.FilterNodes(sel.Nodes[2])
+ assertLength(t, sel2.Nodes, 1)
+}
+
+func TestFilterNodeRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.FilterNodes(sel.Nodes[2]).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestFilterSelection(t *testing.T) {
+ sel := Doc().Find(".link")
+ sel2 := Doc().Find("a[ng-click]")
+ sel3 := sel.FilterSelection(sel2)
+ assertLength(t, sel3.Nodes, 1)
+}
+
+func TestFilterSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".link")
+ sel2 := Doc().Find("a[ng-click]")
+ sel2 = sel.FilterSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestFilterSelectionNil(t *testing.T) {
+ var sel2 *Selection
+
+ sel := Doc().Find(".link")
+ sel3 := sel.FilterSelection(sel2)
+ assertLength(t, sel3.Nodes, 0)
+}
+
+func TestNot(t *testing.T) {
+ sel := Doc().Find(".span12").Not(".alert")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestNotInvalid(t *testing.T) {
+ sel := Doc().Find(".span12").Not("")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestNotRollback(t *testing.T) {
+ sel := Doc().Find(".span12")
+ sel2 := sel.Not(".alert").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNotNone(t *testing.T) {
+ sel := Doc().Find(".span12").Not(".zzalert")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestNotFunction(t *testing.T) {
+ sel := Doc().Find(".pvk-content").NotFunction(func(i int, s *Selection) bool {
+ return i > 0
+ })
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestNotFunctionRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.NotFunction(func(i int, s *Selection) bool {
+ return i > 0
+ }).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNotNode(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.NotNodes(sel.Nodes[2])
+ assertLength(t, sel2.Nodes, 2)
+}
+
+func TestNotNodeRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.NotNodes(sel.Nodes[2]).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNotSelection(t *testing.T) {
+ sel := Doc().Find(".link")
+ sel2 := Doc().Find("a[ng-click]")
+ sel3 := sel.NotSelection(sel2)
+ assertLength(t, sel3.Nodes, 6)
+}
+
+func TestNotSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".link")
+ sel2 := Doc().Find("a[ng-click]")
+ sel2 = sel.NotSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestIntersection(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter")
+ sel2 := Doc().Find("div").Intersection(sel)
+ assertLength(t, sel2.Nodes, 6)
+}
+
+func TestIntersectionRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter")
+ sel2 := Doc().Find("div")
+ sel2 = sel.Intersection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestHas(t *testing.T) {
+ sel := Doc().Find(".container-fluid").Has(".center-content")
+ assertLength(t, sel.Nodes, 2)
+ // Has() returns the high-level .container-fluid div, and the one that is the immediate parent of center-content
+}
+
+func TestHasInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").Has("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestHasRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.Has(".center-content").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestHasNodes(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".center-content")
+ sel = sel.HasNodes(sel2.Nodes...)
+ assertLength(t, sel.Nodes, 2)
+ // Has() returns the high-level .container-fluid div, and the one that is the immediate parent of center-content
+}
+
+func TestHasNodesRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".center-content")
+ sel2 = sel.HasNodes(sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestHasSelection(t *testing.T) {
+ sel := Doc().Find("p")
+ sel2 := Doc().Find("small")
+ sel = sel.HasSelection(sel2)
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestHasSelectionRollback(t *testing.T) {
+ sel := Doc().Find("p")
+ sel2 := Doc().Find("small")
+ sel2 = sel.HasSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestEnd(t *testing.T) {
+ sel := Doc().Find("p").Has("small").End()
+ assertLength(t, sel.Nodes, 4)
+}
+
+func TestEndToTop(t *testing.T) {
+ sel := Doc().Find("p").Has("small").End().End().End()
+ assertLength(t, sel.Nodes, 0)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.mod b/vendor/github.com/PuerkitoBio/goquery/go.mod
new file mode 100644
index 0000000..2fa1332
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.mod
@@ -0,0 +1,6 @@
+module github.com/PuerkitoBio/goquery
+
+require (
+ github.com/andybalholm/cascadia v1.0.0
+ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
+)
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.sum b/vendor/github.com/PuerkitoBio/goquery/go.sum
new file mode 100644
index 0000000..11c5757
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.sum
@@ -0,0 +1,5 @@
+github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/vendor/github.com/PuerkitoBio/goquery/iteration.go b/vendor/github.com/PuerkitoBio/goquery/iteration.go
new file mode 100644
index 0000000..e246f2e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/iteration.go
@@ -0,0 +1,39 @@
+package goquery
+
+// Each iterates over a Selection object, executing a function for each
+// matched element. It returns the current Selection object. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Each(f func(int, *Selection)) *Selection {
+ for i, n := range s.Nodes {
+ f(i, newSingleSelection(n, s.document))
+ }
+ return s
+}
+
+// EachWithBreak iterates over a Selection object, executing a function for each
+// matched element. It is identical to Each except that it is possible to break
+// out of the loop by returning false in the callback function. It returns the
+// current Selection object.
+func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
+ for i, n := range s.Nodes {
+ if !f(i, newSingleSelection(n, s.document)) {
+ return s
+ }
+ }
+ return s
+}
+
+// Map passes each element in the current matched set through a function,
+// producing a slice of string holding the returned values. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
+ for i, n := range s.Nodes {
+ result = append(result, f(i, newSingleSelection(n, s.document)))
+ }
+
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/iteration_test.go b/vendor/github.com/PuerkitoBio/goquery/iteration_test.go
new file mode 100644
index 0000000..9b6aafb
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/iteration_test.go
@@ -0,0 +1,88 @@
+package goquery
+
+import (
+ "testing"
+
+ "golang.org/x/net/html"
+)
+
+func TestEach(t *testing.T) {
+ var cnt int
+
+ sel := Doc().Find(".hero-unit .row-fluid").Each(func(i int, n *Selection) {
+ cnt++
+ t.Logf("At index %v, node %v", i, n.Nodes[0].Data)
+ }).Find("a")
+
+ if cnt != 4 {
+ t.Errorf("Expected Each() to call function 4 times, got %v times.", cnt)
+ }
+ assertLength(t, sel.Nodes, 6)
+}
+
+func TestEachWithBreak(t *testing.T) {
+ var cnt int
+
+ sel := Doc().Find(".hero-unit .row-fluid").EachWithBreak(func(i int, n *Selection) bool {
+ cnt++
+ t.Logf("At index %v, node %v", i, n.Nodes[0].Data)
+ return false
+ }).Find("a")
+
+ if cnt != 1 {
+ t.Errorf("Expected Each() to call function 1 time, got %v times.", cnt)
+ }
+ assertLength(t, sel.Nodes, 6)
+}
+
+func TestEachEmptySelection(t *testing.T) {
+ var cnt int
+
+ sel := Doc().Find("zzzz")
+ sel.Each(func(i int, n *Selection) {
+ cnt++
+ })
+ if cnt > 0 {
+ t.Error("Expected Each() to not be called on empty Selection.")
+ }
+ sel2 := sel.Find("div")
+ assertLength(t, sel2.Nodes, 0)
+}
+
+func TestMap(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ vals := sel.Map(func(i int, s *Selection) string {
+ n := s.Get(0)
+ if n.Type == html.ElementNode {
+ return n.Data
+ }
+ return ""
+ })
+ for _, v := range vals {
+ if v != "div" {
+ t.Error("Expected Map array result to be all 'div's.")
+ }
+ }
+ if len(vals) != 3 {
+ t.Errorf("Expected Map array result to have a length of 3, found %v.", len(vals))
+ }
+}
+
+func TestForRange(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ initLen := sel.Length()
+ for i := range sel.Nodes {
+ single := sel.Eq(i)
+ //h, err := single.Html()
+ //if err != nil {
+ // t.Fatal(err)
+ //}
+ //fmt.Println(i, h)
+ if single.Length() != 1 {
+ t.Errorf("%d: expected length of 1, got %d", i, single.Length())
+ }
+ }
+ if sel.Length() != initLen {
+ t.Errorf("expected initial selection to still have length %d, got %d", initLen, sel.Length())
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation.go b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
new file mode 100644
index 0000000..34eb757
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
@@ -0,0 +1,574 @@
+package goquery
+
+import (
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// After applies the selector from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) After(selector string) *Selection {
+ return s.AfterMatcher(compileMatcher(selector))
+}
+
+// AfterMatcher applies the matcher from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterMatcher(m Matcher) *Selection {
+ return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AfterSelection inserts the elements in the selection after each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterSelection(sel *Selection) *Selection {
+ return s.AfterNodes(sel.Nodes...)
+}
+
+// AfterHtml parses the html and inserts it after the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterHtml(html string) *Selection {
+ return s.AfterNodes(parseHtml(html)...)
+}
+
+// AfterNodes inserts the nodes after each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn.NextSibling)
+ }
+ })
+}
+
+// Append appends the elements specified by the selector to the end of each element
+// in the set of matched elements, following those rules:
+//
+// 1) The selector is applied to the root document.
+//
+// 2) Elements that are part of the document will be moved to the new location.
+//
+// 3) If there are multiple locations to append to, cloned nodes will be
+// appended to all target locations except the last one, which will be moved
+// as noted in (2).
+func (s *Selection) Append(selector string) *Selection {
+ return s.AppendMatcher(compileMatcher(selector))
+}
+
+// AppendMatcher appends the elements specified by the matcher to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendMatcher(m Matcher) *Selection {
+ return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AppendSelection appends the elements in the selection to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendSelection(sel *Selection) *Selection {
+ return s.AppendNodes(sel.Nodes...)
+}
+
+// AppendHtml parses the html and appends it to the set of matched elements.
+func (s *Selection) AppendHtml(html string) *Selection {
+ return s.AppendNodes(parseHtml(html)...)
+}
+
+// AppendNodes appends the specified nodes to each node in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ sn.AppendChild(n)
+ })
+}
+
+// Before inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) Before(selector string) *Selection {
+ return s.BeforeMatcher(compileMatcher(selector))
+}
+
+// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeMatcher(m Matcher) *Selection {
+ return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// BeforeSelection inserts the elements in the selection before each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeSelection(sel *Selection) *Selection {
+ return s.BeforeNodes(sel.Nodes...)
+}
+
+// BeforeHtml parses the html and inserts it before the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeHtml(html string) *Selection {
+ return s.BeforeNodes(parseHtml(html)...)
+}
+
+// BeforeNodes inserts the nodes before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn)
+ }
+ })
+}
+
+// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
+// attached to the document.
+func (s *Selection) Clone() *Selection {
+ ns := newEmptySelection(s.document)
+ ns.Nodes = cloneNodes(s.Nodes)
+ return ns
+}
+
+// Empty removes all children nodes from the set of matched elements.
+// It returns the children nodes in a new Selection.
+func (s *Selection) Empty() *Selection {
+ var nodes []*html.Node
+
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ nodes = append(nodes, c)
+ }
+ }
+
+ return pushStack(s, nodes)
+}
+
+// Prepend prepends the elements specified by the selector to each element in
+// the set of matched elements, following the same rules as Append.
+func (s *Selection) Prepend(selector string) *Selection {
+ return s.PrependMatcher(compileMatcher(selector))
+}
+
+// PrependMatcher prepends the elements specified by the matcher to each
+// element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependMatcher(m Matcher) *Selection {
+ return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// PrependSelection prepends the elements in the selection to each element in
+// the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependSelection(sel *Selection) *Selection {
+ return s.PrependNodes(sel.Nodes...)
+}
+
+// PrependHtml parses the html and prepends it to the set of matched elements.
+func (s *Selection) PrependHtml(html string) *Selection {
+ return s.PrependNodes(parseHtml(html)...)
+}
+
+// PrependNodes prepends the specified nodes to each node in the set of
+// matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ // sn.FirstChild may be nil, in which case this functions like
+ // sn.AppendChild()
+ sn.InsertBefore(n, sn.FirstChild)
+ })
+}
+
+// Remove removes the set of matched elements from the document.
+// It returns the same selection, now consisting of nodes not in the document.
+func (s *Selection) Remove() *Selection {
+ for _, n := range s.Nodes {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ }
+
+ return s
+}
+
+// RemoveFiltered removes the set of matched elements by selector.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveFiltered(selector string) *Selection {
+ return s.RemoveMatcher(compileMatcher(selector))
+}
+
+// RemoveMatcher removes the set of matched elements.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveMatcher(m Matcher) *Selection {
+ return s.FilterMatcher(m).Remove()
+}
+
+// ReplaceWith replaces each element in the set of matched elements with the
+// nodes matched by the given selector.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWith(selector string) *Selection {
+ return s.ReplaceWithMatcher(compileMatcher(selector))
+}
+
+// ReplaceWithMatcher replaces each element in the set of matched elements with
+// the nodes matched by the given Matcher.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
+ return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// ReplaceWithSelection replaces each element in the set of matched elements with
+// the nodes from the given Selection.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
+ return s.ReplaceWithNodes(sel.Nodes...)
+}
+
+// ReplaceWithHtml replaces each element in the set of matched elements with
+// the parsed HTML.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithHtml(html string) *Selection {
+ return s.ReplaceWithNodes(parseHtml(html)...)
+}
+
+// ReplaceWithNodes replaces each element in the set of matched elements with
+// the given nodes.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
+ s.AfterNodes(ns...)
+ return s.Remove()
+}
+
+// SetHtml sets the html content of each element in the selection to
+// specified html string.
+func (s *Selection) SetHtml(html string) *Selection {
+ return setHtmlNodes(s, parseHtml(html)...)
+}
+
+// SetText sets the content of each element in the selection to specified content.
+// The provided text string is escaped.
+func (s *Selection) SetText(text string) *Selection {
+ return s.SetHtml(html.EscapeString(text))
+}
+
+// Unwrap removes the parents of the set of matched elements, leaving the matched
+// elements (and their siblings, if any) in their place.
+// It returns the original selection.
+func (s *Selection) Unwrap() *Selection {
+ s.Parent().Each(func(i int, ss *Selection) {
+ // For some reason, jquery allows unwrap to remove the element, so
+ // allowing it here too. Same for . Why it allows those elements to
+ // be unwrapped while not allowing body is a mystery to me.
+ if ss.Nodes[0].Data != "body" {
+ ss.ReplaceWithSelection(ss.Contents())
+ }
+ })
+
+ return s
+}
+
+// Wrap wraps each element in the set of matched elements inside the first
+// element matched by the given selector. The matched child is cloned before
+// being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) Wrap(selector string) *Selection {
+ return s.WrapMatcher(compileMatcher(selector))
+}
+
+// WrapMatcher wraps each element in the set of matched elements inside the
+// first element matched by the given matcher. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapMatcher(m Matcher) *Selection {
+ return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapSelection wraps each element in the set of matched elements inside the
+// first element in the given Selection. The element is cloned before being
+// inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapSelection(sel *Selection) *Selection {
+ return s.wrapNodes(sel.Nodes...)
+}
+
+// WrapHtml wraps each element in the set of matched elements inside the inner-
+// most child of the given HTML.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapHtml(html string) *Selection {
+ return s.wrapNodes(parseHtml(html)...)
+}
+
+// WrapNode wraps each element in the set of matched elements inside the inner-
+// most child of the given node. The given node is copied before being inserted
+// into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapNode(n *html.Node) *Selection {
+ return s.wrapNodes(n)
+}
+
+func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
+ s.Each(func(i int, ss *Selection) {
+ ss.wrapAllNodes(ns...)
+ })
+
+ return s
+}
+
+// WrapAll wraps a single HTML structure, matched by the given selector, around
+// all elements in the set of matched elements. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAll(selector string) *Selection {
+ return s.WrapAllMatcher(compileMatcher(selector))
+}
+
+// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
+// around all elements in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
+ return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapAllSelection wraps a single HTML structure, the first node of the given
+// Selection, around all elements in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
+ return s.wrapAllNodes(sel.Nodes...)
+}
+
+// WrapAllHtml wraps the given HTML structure around all elements in the set of
+// matched elements. The matched child is cloned before being inserted into the
+// document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllHtml(html string) *Selection {
+ return s.wrapAllNodes(parseHtml(html)...)
+}
+
+func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
+ if len(ns) > 0 {
+ return s.WrapAllNode(ns[0])
+ }
+ return s
+}
+
+// WrapAllNode wraps the given node around the first element in the Selection,
+// making all other nodes in the Selection children of the given node. The node
+// is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllNode(n *html.Node) *Selection {
+ if s.Size() == 0 {
+ return s
+ }
+
+ wrap := cloneNode(n)
+
+ first := s.Nodes[0]
+ if first.Parent != nil {
+ first.Parent.InsertBefore(wrap, first)
+ first.Parent.RemoveChild(first)
+ }
+
+ for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
+ wrap = c
+ }
+
+ newSingleSelection(wrap, s.document).AppendSelection(s)
+
+ return s
+}
+
+// WrapInner wraps an HTML structure, matched by the given selector, around the
+// content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInner(selector string) *Selection {
+ return s.WrapInnerMatcher(compileMatcher(selector))
+}
+
+// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
+ return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapInnerSelection wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
+ return s.wrapInnerNodes(sel.Nodes...)
+}
+
+// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerHtml(html string) *Selection {
+ return s.wrapInnerNodes(parseHtml(html)...)
+}
+
+// WrapInnerNode wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
+ return s.wrapInnerNodes(n)
+}
+
+func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
+ if len(ns) == 0 {
+ return s
+ }
+
+ s.Each(func(i int, s *Selection) {
+ contents := s.Contents()
+
+ if contents.Size() > 0 {
+ contents.wrapAllNodes(ns...)
+ } else {
+ s.AppendNodes(cloneNode(ns[0]))
+ }
+ })
+
+ return s
+}
+
+func parseHtml(h string) []*html.Node {
+ // Errors are only returned when the io.Reader returns any error besides
+ // EOF, but strings.Reader never will
+ nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
+ if err != nil {
+ panic("goquery: failed to parse HTML: " + err.Error())
+ }
+ return nodes
+}
+
+func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ }
+ for _, c := range ns {
+ n.AppendChild(cloneNode(c))
+ }
+ }
+ return s
+}
+
+// Get the first child that is an ElementNode
+func getFirstChildEl(n *html.Node) *html.Node {
+ c := n.FirstChild
+ for c != nil && c.Type != html.ElementNode {
+ c = c.NextSibling
+ }
+ return c
+}
+
+// Deep copy a slice of nodes.
+func cloneNodes(ns []*html.Node) []*html.Node {
+ cns := make([]*html.Node, 0, len(ns))
+
+ for _, n := range ns {
+ cns = append(cns, cloneNode(n))
+ }
+
+ return cns
+}
+
+// Deep copy a node. The new node has clones of all the original node's
+// children but none of its parents or siblings.
+func cloneNode(n *html.Node) *html.Node {
+ nn := &html.Node{
+ Type: n.Type,
+ DataAtom: n.DataAtom,
+ Data: n.Data,
+ Attr: make([]html.Attribute, len(n.Attr)),
+ }
+
+ copy(nn.Attr, n.Attr)
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ nn.AppendChild(cloneNode(c))
+ }
+
+ return nn
+}
+
+func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
+ f func(sn *html.Node, n *html.Node)) *Selection {
+
+ lasti := s.Size() - 1
+
+ // net.Html doesn't provide document fragments for insertion, so to get
+ // things in the correct order with After() and Prepend(), the callback
+ // needs to be called on the reverse of the nodes.
+ if reverse {
+ for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
+ ns[i], ns[j] = ns[j], ns[i]
+ }
+ }
+
+ for i, sn := range s.Nodes {
+ for _, n := range ns {
+ if i != lasti {
+ f(sn, cloneNode(n))
+ } else {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ f(sn, n)
+ }
+ }
+ }
+
+ return s
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation_test.go b/vendor/github.com/PuerkitoBio/goquery/manipulation_test.go
new file mode 100644
index 0000000..c5f5022
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/manipulation_test.go
@@ -0,0 +1,513 @@
+package goquery
+
+import (
+ "testing"
+)
+
+const (
+ wrapHtml = "
"
+ q.SetText(repl)
+
+ assertLength(t, doc.Find("#replace").Nodes, 0)
+ assertLength(t, doc.Find("#main, #foot").Nodes, 2)
+
+ if q.Text() != (repl + repl) {
+ t.Errorf("Expected text to be %v, found %v", (repl + repl), q.Text())
+ }
+
+ h, err := q.Html()
+ if err != nil {
+ t.Errorf("Error: %v", err)
+ }
+ esc := "<div id="replace">test</div>"
+ if h != esc {
+ t.Errorf("Expected html to be %v, found %v", esc, h)
+ }
+
+ printSel(t, doc.Selection)
+}
+
+func TestReplaceWithSelection(t *testing.T) {
+ doc := Doc2Clone()
+ sel := doc.Find("#nf6").ReplaceWithSelection(doc.Find("#nf5"))
+
+ assertSelectionIs(t, sel, "#nf6")
+ assertLength(t, doc.Find("#nf6").Nodes, 0)
+ assertLength(t, doc.Find("#nf5").Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestUnwrap(t *testing.T) {
+ doc := Doc2Clone()
+
+ doc.Find("#nf5").Unwrap()
+ assertLength(t, doc.Find("#foot").Nodes, 0)
+ assertLength(t, doc.Find("body > #nf1").Nodes, 1)
+ assertLength(t, doc.Find("body > #nf5").Nodes, 1)
+
+ printSel(t, doc.Selection)
+
+ doc = Doc2Clone()
+
+ doc.Find("#nf5, #n1").Unwrap()
+ assertLength(t, doc.Find("#foot").Nodes, 0)
+ assertLength(t, doc.Find("#main").Nodes, 0)
+ assertLength(t, doc.Find("body > #n1").Nodes, 1)
+ assertLength(t, doc.Find("body > #nf5").Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestUnwrapBody(t *testing.T) {
+ doc := Doc2Clone()
+
+ doc.Find("#main").Unwrap()
+ assertLength(t, doc.Find("body").Nodes, 1)
+ assertLength(t, doc.Find("body > #main").Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestUnwrapHead(t *testing.T) {
+ doc := Doc2Clone()
+
+ doc.Find("title").Unwrap()
+ assertLength(t, doc.Find("head").Nodes, 0)
+ assertLength(t, doc.Find("head > title").Nodes, 0)
+ assertLength(t, doc.Find("title").Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestUnwrapHtml(t *testing.T) {
+ doc := Doc2Clone()
+
+ doc.Find("head").Unwrap()
+ assertLength(t, doc.Find("html").Nodes, 0)
+ assertLength(t, doc.Find("html head").Nodes, 0)
+ assertLength(t, doc.Find("head").Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrap(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find("#nf1").Wrap("#nf2")
+ nf1 := doc.Find("#foot #nf2 #nf1")
+ assertLength(t, nf1.Nodes, 1)
+
+ nf2 := doc.Find("#nf2")
+ assertLength(t, nf2.Nodes, 2)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapEmpty(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find("#nf1").Wrap("#doesnt-exist")
+
+ origHtml, _ := Doc2().Html()
+ newHtml, _ := doc.Html()
+
+ if origHtml != newHtml {
+ t.Error("Expected the two documents to be identical.")
+ }
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapHtml(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find(".odd").WrapHtml(wrapHtml)
+ nf2 := doc.Find("#ins #nf2")
+ assertLength(t, nf2.Nodes, 1)
+ printSel(t, doc.Selection)
+}
+
+func TestWrapSelection(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find("#nf1").WrapSelection(doc.Find("#nf2"))
+ nf1 := doc.Find("#foot #nf2 #nf1")
+ assertLength(t, nf1.Nodes, 1)
+
+ nf2 := doc.Find("#nf2")
+ assertLength(t, nf2.Nodes, 2)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapAll(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find(".odd").WrapAll("#nf1")
+ nf1 := doc.Find("#main #nf1")
+ assertLength(t, nf1.Nodes, 1)
+
+ sel := nf1.Find("#n2 ~ #n4 ~ #n6 ~ #nf2 ~ #nf4 ~ #nf6")
+ assertLength(t, sel.Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapAllHtml(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find(".odd").WrapAllHtml(wrapHtml)
+ nf1 := doc.Find("#main div#ins div p em b #n2 ~ #n4 ~ #n6 ~ #nf2 ~ #nf4 ~ #nf6")
+ assertLength(t, nf1.Nodes, 1)
+ printSel(t, doc.Selection)
+}
+
+func TestWrapInnerNoContent(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find(".one").WrapInner(".two")
+
+ twos := doc.Find(".two")
+ assertLength(t, twos.Nodes, 4)
+ assertLength(t, doc.Find(".one .two").Nodes, 2)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapInnerWithContent(t *testing.T) {
+ doc := Doc3Clone()
+ doc.Find(".one").WrapInner(".two")
+
+ twos := doc.Find(".two")
+ assertLength(t, twos.Nodes, 4)
+ assertLength(t, doc.Find(".one .two").Nodes, 2)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapInnerNoWrapper(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find(".one").WrapInner(".not-exist")
+
+ twos := doc.Find(".two")
+ assertLength(t, twos.Nodes, 2)
+ assertLength(t, doc.Find(".one").Nodes, 2)
+ assertLength(t, doc.Find(".one .two").Nodes, 0)
+
+ printSel(t, doc.Selection)
+}
+
+func TestWrapInnerHtml(t *testing.T) {
+ doc := Doc2Clone()
+ doc.Find("#foot").WrapInnerHtml(wrapHtml)
+
+ foot := doc.Find("#foot div#ins div p em b #nf1 ~ #nf2 ~ #nf3")
+ assertLength(t, foot.Nodes, 1)
+
+ printSel(t, doc.Selection)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/property.go b/vendor/github.com/PuerkitoBio/goquery/property.go
new file mode 100644
index 0000000..411126d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/property.go
@@ -0,0 +1,275 @@
+package goquery
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+var rxClassTrim = regexp.MustCompile("[\t\r\n]")
+
+// Attr gets the specified attribute's value for the first element in the
+// Selection. To get the value for each element individually, use a looping
+// construct such as Each or Map method.
+func (s *Selection) Attr(attrName string) (val string, exists bool) {
+ if len(s.Nodes) == 0 {
+ return
+ }
+ return getAttributeValue(attrName, s.Nodes[0])
+}
+
+// AttrOr works like Attr but returns default value if attribute is not present.
+func (s *Selection) AttrOr(attrName, defaultValue string) string {
+ if len(s.Nodes) == 0 {
+ return defaultValue
+ }
+
+ val, exists := getAttributeValue(attrName, s.Nodes[0])
+ if !exists {
+ return defaultValue
+ }
+
+ return val
+}
+
+// RemoveAttr removes the named attribute from each element in the set of matched elements.
+func (s *Selection) RemoveAttr(attrName string) *Selection {
+ for _, n := range s.Nodes {
+ removeAttr(n, attrName)
+ }
+
+ return s
+}
+
+// SetAttr sets the given attribute on each element in the set of matched elements.
+func (s *Selection) SetAttr(attrName, val string) *Selection {
+ for _, n := range s.Nodes {
+ attr := getAttributePtr(attrName, n)
+ if attr == nil {
+ n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
+ } else {
+ attr.Val = val
+ }
+ }
+
+ return s
+}
+
+// Text gets the combined text contents of each element in the set of matched
+// elements, including their descendants.
+func (s *Selection) Text() string {
+ var buf bytes.Buffer
+
+ // Slightly optimized vs calling Each: no single selection object created
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.TextNode {
+ // Keep newlines and spaces, like jQuery
+ buf.WriteString(n.Data)
+ }
+ if n.FirstChild != nil {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ }
+ for _, n := range s.Nodes {
+ f(n)
+ }
+
+ return buf.String()
+}
+
+// Size is an alias for Length.
+func (s *Selection) Size() int {
+ return s.Length()
+}
+
+// Length returns the number of elements in the Selection object.
+func (s *Selection) Length() int {
+ return len(s.Nodes)
+}
+
+// Html gets the HTML contents of the first element in the set of matched
+// elements. It includes text and comment nodes.
+func (s *Selection) Html() (ret string, e error) {
+ // Since there is no .innerHtml, the HTML content must be re-created from
+ // the nodes using html.Render.
+ var buf bytes.Buffer
+
+ if len(s.Nodes) > 0 {
+ for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
+ e = html.Render(&buf, c)
+ if e != nil {
+ return
+ }
+ }
+ ret = buf.String()
+ }
+
+ return
+}
+
+// AddClass adds the given class(es) to each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) AddClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+ for _, n := range s.Nodes {
+ curClasses, attr := getClassesAndAttr(n, true)
+ for _, newClass := range tcls {
+ if !strings.Contains(curClasses, " "+newClass+" ") {
+ curClasses += newClass + " "
+ }
+ }
+
+ setClasses(n, attr, curClasses)
+ }
+
+ return s
+}
+
+// HasClass determines whether any of the matched elements are assigned the
+// given class.
+func (s *Selection) HasClass(class string) bool {
+ class = " " + class + " "
+ for _, n := range s.Nodes {
+ classes, _ := getClassesAndAttr(n, false)
+ if strings.Contains(classes, class) {
+ return true
+ }
+ }
+ return false
+}
+
+// RemoveClass removes the given class(es) from each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+// If no class name is provided, all classes are removed.
+func (s *Selection) RemoveClass(class ...string) *Selection {
+ var rclasses []string
+
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+ remove := classStr == ""
+
+ if !remove {
+ rclasses = getClassesSlice(classStr)
+ }
+
+ for _, n := range s.Nodes {
+ if remove {
+ removeAttr(n, "class")
+ } else {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, rcl := range rclasses {
+ classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
+ }
+
+ setClasses(n, attr, classes)
+ }
+ }
+
+ return s
+}
+
+// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) ToggleClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+
+ for _, n := range s.Nodes {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, tcl := range tcls {
+ if strings.Contains(classes, " "+tcl+" ") {
+ classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
+ } else {
+ classes += tcl + " "
+ }
+ }
+
+ setClasses(n, attr, classes)
+ }
+
+ return s
+}
+
+func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
+ if n == nil {
+ return nil
+ }
+
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ return &n.Attr[i]
+ }
+ }
+ return nil
+}
+
+// Private function to get the specified attribute's value from a node.
+func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
+ if a := getAttributePtr(attrName, n); a != nil {
+ val = a.Val
+ exists = true
+ }
+ return
+}
+
+// Get and normalize the "class" attribute from the node.
+func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
+ // Applies only to element nodes
+ if n.Type == html.ElementNode {
+ attr = getAttributePtr("class", n)
+ if attr == nil && create {
+ n.Attr = append(n.Attr, html.Attribute{
+ Key: "class",
+ Val: "",
+ })
+ attr = &n.Attr[len(n.Attr)-1]
+ }
+ }
+
+ if attr == nil {
+ classes = " "
+ } else {
+ classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
+ }
+
+ return
+}
+
+func getClassesSlice(classes string) []string {
+ return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
+}
+
+func removeAttr(n *html.Node, attrName string) {
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
+ n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
+ return
+ }
+ }
+}
+
+func setClasses(n *html.Node, attr *html.Attribute, classes string) {
+ classes = strings.TrimSpace(classes)
+ if classes == "" {
+ removeAttr(n, "class")
+ return
+ }
+
+ attr.Val = classes
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/property_test.go b/vendor/github.com/PuerkitoBio/goquery/property_test.go
new file mode 100644
index 0000000..1095dcc
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/property_test.go
@@ -0,0 +1,252 @@
+package goquery
+
+import (
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestAttrExists(t *testing.T) {
+ if val, ok := Doc().Find("a").Attr("href"); !ok {
+ t.Error("Expected a value for the href attribute.")
+ } else {
+ t.Logf("Href of first anchor: %v.", val)
+ }
+}
+
+func TestAttrOr(t *testing.T) {
+ if val := Doc().Find("a").AttrOr("fake-attribute", "alternative"); val != "alternative" {
+ t.Error("Expected an alternative value for 'fake-attribute' attribute.")
+ } else {
+ t.Logf("Value returned for not existing attribute: %v.", val)
+ }
+ if val := Doc().Find("zz").AttrOr("fake-attribute", "alternative"); val != "alternative" {
+ t.Error("Expected an alternative value for 'fake-attribute' on an empty selection.")
+ } else {
+ t.Logf("Value returned for empty selection: %v.", val)
+ }
+}
+
+func TestAttrNotExist(t *testing.T) {
+ if val, ok := Doc().Find("div.row-fluid").Attr("href"); ok {
+ t.Errorf("Expected no value for the href attribute, got %v.", val)
+ }
+}
+
+func TestRemoveAttr(t *testing.T) {
+ sel := Doc2Clone().Find("div")
+
+ sel.RemoveAttr("id")
+
+ _, ok := sel.Attr("id")
+ if ok {
+ t.Error("Expected there to be no id attributes set")
+ }
+}
+
+func TestSetAttr(t *testing.T) {
+ sel := Doc2Clone().Find("#main")
+
+ sel.SetAttr("id", "not-main")
+
+ val, ok := sel.Attr("id")
+ if !ok {
+ t.Error("Expected an id attribute on main")
+ }
+
+ if val != "not-main" {
+ t.Errorf("Expected an attribute id to be not-main, got %s", val)
+ }
+}
+
+func TestSetAttr2(t *testing.T) {
+ sel := Doc2Clone().Find("#main")
+
+ sel.SetAttr("foo", "bar")
+
+ val, ok := sel.Attr("foo")
+ if !ok {
+ t.Error("Expected an 'foo' attribute on main")
+ }
+
+ if val != "bar" {
+ t.Errorf("Expected an attribute 'foo' to be 'bar', got '%s'", val)
+ }
+}
+
+func TestText(t *testing.T) {
+ txt := Doc().Find("h1").Text()
+ if strings.Trim(txt, " \n\r\t") != "Provok.in" {
+ t.Errorf("Expected text to be Provok.in, found %s.", txt)
+ }
+}
+
+func TestText2(t *testing.T) {
+ txt := Doc().Find(".hero-unit .container-fluid .row-fluid:nth-child(1)").Text()
+ if ok, e := regexp.MatchString(`^\s+Provok\.in\s+Prove your point.\s+$`, txt); !ok || e != nil {
+ t.Errorf("Expected text to be Provok.in Prove your point., found %s.", txt)
+ if e != nil {
+ t.Logf("Error: %s.", e.Error())
+ }
+ }
+}
+
+func TestText3(t *testing.T) {
+ txt := Doc().Find(".pvk-gutter").First().Text()
+ // There's an character in there...
+ if ok, e := regexp.MatchString(`^[\s\x{00A0}]+$`, txt); !ok || e != nil {
+ t.Errorf("Expected spaces, found <%v>.", txt)
+ if e != nil {
+ t.Logf("Error: %s.", e.Error())
+ }
+ }
+}
+
+func TestHtml(t *testing.T) {
+ txt, e := Doc().Find("h1").Html()
+ if e != nil {
+ t.Errorf("Error: %s.", e)
+ }
+
+ if ok, e := regexp.MatchString(`^\s*Provok\.in\s*$`, txt); !ok || e != nil {
+ t.Errorf("Unexpected HTML content, found %s.", txt)
+ if e != nil {
+ t.Logf("Error: %s.", e.Error())
+ }
+ }
+}
+
+func TestNbsp(t *testing.T) {
+ src := `
Some text
`
+ d, err := NewDocumentFromReader(strings.NewReader(src))
+ if err != nil {
+ t.Fatal(err)
+ }
+ txt := d.Find("p").Text()
+ ix := strings.Index(txt, "\u00a0")
+ if ix != 4 {
+ t.Errorf("Text: expected a non-breaking space at index 4, got %d", ix)
+ }
+
+ h, err := d.Find("p").Html()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ix = strings.Index(h, "\u00a0")
+ if ix != 4 {
+ t.Errorf("Html: expected a non-breaking space at index 4, got %d", ix)
+ }
+}
+
+func TestAddClass(t *testing.T) {
+ sel := Doc2Clone().Find("#main")
+ sel.AddClass("main main main")
+
+ // Make sure that class was only added once
+ if a, ok := sel.Attr("class"); !ok || a != "main" {
+ t.Error("Expected #main to have class main")
+ }
+}
+
+func TestAddClassSimilar(t *testing.T) {
+ sel := Doc2Clone().Find("#nf5")
+ sel.AddClass("odd")
+
+ assertClass(t, sel, "odd")
+ assertClass(t, sel, "odder")
+ printSel(t, sel.Parent())
+}
+
+func TestAddEmptyClass(t *testing.T) {
+ sel := Doc2Clone().Find("#main")
+ sel.AddClass("")
+
+ // Make sure that class was only added once
+ if a, ok := sel.Attr("class"); ok {
+ t.Errorf("Expected #main to not to have a class, have: %s", a)
+ }
+}
+
+func TestAddClasses(t *testing.T) {
+ sel := Doc2Clone().Find("#main")
+ sel.AddClass("a b")
+
+ // Make sure that class was only added once
+ if !sel.HasClass("a") || !sel.HasClass("b") {
+ t.Errorf("#main does not have classes")
+ }
+}
+
+func TestHasClass(t *testing.T) {
+ sel := Doc().Find("div")
+ if !sel.HasClass("span12") {
+ t.Error("Expected at least one div to have class span12.")
+ }
+}
+
+func TestHasClassNone(t *testing.T) {
+ sel := Doc().Find("h2")
+ if sel.HasClass("toto") {
+ t.Error("Expected h1 to have no class.")
+ }
+}
+
+func TestHasClassNotFirst(t *testing.T) {
+ sel := Doc().Find(".alert")
+ if !sel.HasClass("alert-error") {
+ t.Error("Expected .alert to also have class .alert-error.")
+ }
+}
+
+func TestRemoveClass(t *testing.T) {
+ sel := Doc2Clone().Find("#nf1")
+ sel.RemoveClass("one row")
+
+ if !sel.HasClass("even") || sel.HasClass("one") || sel.HasClass("row") {
+ classes, _ := sel.Attr("class")
+ t.Error("Expected #nf1 to have class even, has ", classes)
+ }
+}
+
+func TestRemoveClassSimilar(t *testing.T) {
+ sel := Doc2Clone().Find("#nf5, #nf6")
+ assertLength(t, sel.Nodes, 2)
+
+ sel.RemoveClass("odd")
+ assertClass(t, sel.Eq(0), "odder")
+ printSel(t, sel)
+}
+
+func TestRemoveAllClasses(t *testing.T) {
+ sel := Doc2Clone().Find("#nf1")
+ sel.RemoveClass()
+
+ if a, ok := sel.Attr("class"); ok {
+ t.Error("All classes were not removed, has ", a)
+ }
+
+ sel = Doc2Clone().Find("#main")
+ sel.RemoveClass()
+ if a, ok := sel.Attr("class"); ok {
+ t.Error("All classes were not removed, has ", a)
+ }
+}
+
+func TestToggleClass(t *testing.T) {
+ sel := Doc2Clone().Find("#nf1")
+
+ sel.ToggleClass("one")
+ if sel.HasClass("one") {
+ t.Error("Expected #nf1 to not have class one")
+ }
+
+ sel.ToggleClass("one")
+ if !sel.HasClass("one") {
+ t.Error("Expected #nf1 to have class one")
+ }
+
+ sel.ToggleClass("one even row")
+ if a, ok := sel.Attr("class"); ok {
+ t.Errorf("Expected #nf1 to have no classes, have %q", a)
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/query.go b/vendor/github.com/PuerkitoBio/goquery/query.go
new file mode 100644
index 0000000..fe86bf0
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/query.go
@@ -0,0 +1,49 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Is checks the current matched set of elements against a selector and
+// returns true if at least one of these elements matches.
+func (s *Selection) Is(selector string) bool {
+ return s.IsMatcher(compileMatcher(selector))
+}
+
+// IsMatcher checks the current matched set of elements against a matcher and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsMatcher(m Matcher) bool {
+ if len(s.Nodes) > 0 {
+ if len(s.Nodes) == 1 {
+ return m.Match(s.Nodes[0])
+ }
+ return len(m.Filter(s.Nodes)) > 0
+ }
+
+ return false
+}
+
+// IsFunction checks the current matched set of elements against a predicate and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
+ return s.FilterFunction(f).Length() > 0
+}
+
+// IsSelection checks the current matched set of elements against a Selection object
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsSelection(sel *Selection) bool {
+ return s.FilterSelection(sel).Length() > 0
+}
+
+// IsNodes checks the current matched set of elements against the specified nodes
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsNodes(nodes ...*html.Node) bool {
+ return s.FilterNodes(nodes...).Length() > 0
+}
+
+// Contains returns true if the specified Node is within,
+// at any depth, one of the nodes in the Selection object.
+// It is NOT inclusive, to behave like jQuery's implementation, and
+// unlike Javascript's .contains, so if the contained
+// node is itself in the selection, it returns false.
+func (s *Selection) Contains(n *html.Node) bool {
+ return sliceContains(s.Nodes, n)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/query_test.go b/vendor/github.com/PuerkitoBio/goquery/query_test.go
new file mode 100644
index 0000000..54b2a2e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/query_test.go
@@ -0,0 +1,103 @@
+package goquery
+
+import (
+ "testing"
+)
+
+func TestIs(t *testing.T) {
+ sel := Doc().Find(".footer p:nth-child(1)")
+ if !sel.Is("p") {
+ t.Error("Expected .footer p:nth-child(1) to be p.")
+ }
+}
+
+func TestIsInvalid(t *testing.T) {
+ sel := Doc().Find(".footer p:nth-child(1)")
+ if sel.Is("") {
+ t.Error("Is should not succeed with invalid selector string")
+ }
+}
+
+func TestIsPositional(t *testing.T) {
+ sel := Doc().Find(".footer p:nth-child(2)")
+ if !sel.Is("p:nth-child(2)") {
+ t.Error("Expected .footer p:nth-child(2) to be p:nth-child(2).")
+ }
+}
+
+func TestIsPositionalNot(t *testing.T) {
+ sel := Doc().Find(".footer p:nth-child(1)")
+ if sel.Is("p:nth-child(2)") {
+ t.Error("Expected .footer p:nth-child(1) NOT to be p:nth-child(2).")
+ }
+}
+
+func TestIsFunction(t *testing.T) {
+ ok := Doc().Find("div").IsFunction(func(i int, s *Selection) bool {
+ return s.HasClass("container-fluid")
+ })
+
+ if !ok {
+ t.Error("Expected some div to have a container-fluid class.")
+ }
+}
+
+func TestIsFunctionRollback(t *testing.T) {
+ ok := Doc().Find("div").IsFunction(func(i int, s *Selection) bool {
+ return s.HasClass("container-fluid")
+ })
+
+ if !ok {
+ t.Error("Expected some div to have a container-fluid class.")
+ }
+}
+
+func TestIsSelection(t *testing.T) {
+ sel := Doc().Find("div")
+ sel2 := Doc().Find(".pvk-gutter")
+
+ if !sel.IsSelection(sel2) {
+ t.Error("Expected some div to have a pvk-gutter class.")
+ }
+}
+
+func TestIsSelectionNot(t *testing.T) {
+ sel := Doc().Find("div")
+ sel2 := Doc().Find("a")
+
+ if sel.IsSelection(sel2) {
+ t.Error("Expected some div NOT to be an anchor.")
+ }
+}
+
+func TestIsNodes(t *testing.T) {
+ sel := Doc().Find("div")
+ sel2 := Doc().Find(".footer")
+
+ if !sel.IsNodes(sel2.Nodes[0]) {
+ t.Error("Expected some div to have a footer class.")
+ }
+}
+
+func TestDocContains(t *testing.T) {
+ sel := Doc().Find("h1")
+ if !Doc().Contains(sel.Nodes[0]) {
+ t.Error("Expected document to contain H1 tag.")
+ }
+}
+
+func TestSelContains(t *testing.T) {
+ sel := Doc().Find(".row-fluid")
+ sel2 := Doc().Find("a[ng-click]")
+ if !sel.Contains(sel2.Nodes[0]) {
+ t.Error("Expected .row-fluid to contain a[ng-click] tag.")
+ }
+}
+
+func TestSelNotContains(t *testing.T) {
+ sel := Doc().Find("a.link")
+ sel2 := Doc().Find("span")
+ if sel.Contains(sel2.Nodes[0]) {
+ t.Error("Expected a.link to NOT contain span tag.")
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/traversal.go b/vendor/github.com/PuerkitoBio/goquery/traversal.go
new file mode 100644
index 0000000..5fa5315
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/traversal.go
@@ -0,0 +1,698 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+type siblingType int
+
+// Sibling type, used internally when iterating over children at the same
+// level (siblings) to specify which nodes are requested.
+const (
+ siblingPrevUntil siblingType = iota - 3
+ siblingPrevAll
+ siblingPrev
+ siblingAll
+ siblingNext
+ siblingNextAll
+ siblingNextUntil
+ siblingAllIncludingNonElements
+)
+
+// Find gets the descendants of each element in the current set of matched
+// elements, filtered by a selector. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) Find(selector string) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
+}
+
+// FindMatcher gets the descendants of each element in the current set of matched
+// elements, filtered by the matcher. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindMatcher(m Matcher) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, m))
+}
+
+// FindSelection gets the descendants of each element in the current
+// Selection, filtered by a Selection. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.FindNodes(sel.Nodes...)
+}
+
+// FindNodes gets the descendants of each element in the current
+// Selection, filtered by some nodes. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if sliceContains(s.Nodes, n) {
+ return []*html.Node{n}
+ }
+ return nil
+ }))
+}
+
+// Contents gets the children of each element in the Selection,
+// including text and comment nodes. It returns a new Selection object
+// containing these elements.
+func (s *Selection) Contents() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
+}
+
+// ContentsFiltered gets the children of each element in the Selection,
+// filtered by the specified selector. It returns a new Selection
+// object containing these elements. Since selectors only act on Element nodes,
+// this function is an alias to ChildrenFiltered unless the selector is empty,
+// in which case it is an alias to Contents.
+func (s *Selection) ContentsFiltered(selector string) *Selection {
+ if selector != "" {
+ return s.ChildrenFiltered(selector)
+ }
+ return s.Contents()
+}
+
+// ContentsMatcher gets the children of each element in the Selection,
+// filtered by the specified matcher. It returns a new Selection
+// object containing these elements. Since matchers only act on Element nodes,
+// this function is an alias to ChildrenMatcher.
+func (s *Selection) ContentsMatcher(m Matcher) *Selection {
+ return s.ChildrenMatcher(m)
+}
+
+// Children gets the child elements of each element in the Selection.
+// It returns a new Selection object containing these elements.
+func (s *Selection) Children() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
+}
+
+// ChildrenFiltered gets the child elements of each element in the Selection,
+// filtered by the specified selector. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenFiltered(selector string) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
+}
+
+// ChildrenMatcher gets the child elements of each element in the Selection,
+// filtered by the specified matcher. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
+}
+
+// Parent gets the parent of each element in the Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) Parent() *Selection {
+ return pushStack(s, getParentNodes(s.Nodes))
+}
+
+// ParentFiltered gets the parent of each element in the Selection filtered by a
+// selector. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
+}
+
+// ParentMatcher gets the parent of each element in the Selection filtered by a
+// matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), m)
+}
+
+// Closest gets the first element that matches the selector by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) Closest(selector string) *Selection {
+ cs := compileMatcher(selector)
+ return s.ClosestMatcher(cs)
+}
+
+// ClosestMatcher gets the first element that matches the matcher by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestMatcher(m Matcher) *Selection {
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if m.Match(n) {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestNodes gets the first element that matches one of the nodes by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if set[n] {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestSelection gets the first element that matches one of the nodes in the
+// Selection by testing the element itself and traversing up through its ancestors
+// in the DOM tree.
+func (s *Selection) ClosestSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.ClosestNodes(sel.Nodes...)
+}
+
+// Parents gets the ancestors of each element in the current Selection. It
+// returns a new Selection object with the matched elements.
+func (s *Selection) Parents() *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
+}
+
+// ParentsFiltered gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
+}
+
+// ParentsMatcher gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
+}
+
+// ParentsUntil gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntil(selector string) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
+}
+
+// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, m, nil))
+}
+
+// ParentsUntilSelection gets the ancestors of each element in the Selection,
+// up to but not including the elements in the specified Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.Parents()
+ }
+ return s.ParentsUntilNodes(sel.Nodes...)
+}
+
+// ParentsUntilNodes gets the ancestors of each element in the Selection,
+// up to but not including the specified nodes. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
+}
+
+// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
+// results based on a selector string. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
+// results based on a matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
+}
+
+// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.ParentsMatcher(filter)
+ }
+ return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
+}
+
+// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
+}
+
+// Siblings gets the siblings of each element in the Selection. It returns
+// a new Selection object containing the matched elements.
+func (s *Selection) Siblings() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
+}
+
+// SiblingsFiltered gets the siblings of each element in the Selection
+// filtered by a selector. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
+}
+
+// SiblingsMatcher gets the siblings of each element in the Selection
+// filtered by a matcher. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
+}
+
+// Next gets the immediately following sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Next() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
+}
+
+// NextFiltered gets the immediately following sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
+}
+
+// NextMatcher gets the immediately following sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
+}
+
+// NextAll gets all the following siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) NextAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
+}
+
+// NextAllFiltered gets all the following siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
+}
+
+// NextAllMatcher gets all the following siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
+}
+
+// Prev gets the immediately preceding sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Prev() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
+}
+
+// PrevFiltered gets the immediately preceding sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
+}
+
+// PrevMatcher gets the immediately preceding sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
+}
+
+// PrevAll gets all the preceding siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
+}
+
+// PrevAllFiltered gets all the preceding siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
+}
+
+// PrevAllMatcher gets all the preceding siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
+}
+
+// NextUntil gets all following siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(selector), nil))
+}
+
+// NextUntilMatcher gets all following siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ m, nil))
+}
+
+// NextUntilSelection gets all following siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextAll()
+ }
+ return s.NextUntilNodes(sel.Nodes...)
+}
+
+// NextUntilNodes gets all following siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes))
+}
+
+// PrevUntil gets all preceding siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(selector), nil))
+}
+
+// PrevUntilMatcher gets all preceding siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ m, nil))
+}
+
+// PrevUntilSelection gets all preceding siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevAll()
+ }
+ return s.PrevUntilNodes(sel.Nodes...)
+}
+
+// PrevUntilNodes gets all preceding siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes))
+}
+
+// NextFilteredUntil is like NextUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ until, nil), filter)
+}
+
+// NextFilteredUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// NextMatcherUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextMatcher(filter)
+ }
+ return s.NextMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// NextFilteredUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// NextMatcherUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), filter)
+}
+
+// PrevFilteredUntil is like PrevUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ until, nil), filter)
+}
+
+// PrevFilteredUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// PrevMatcherUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevMatcher(filter)
+ }
+ return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// PrevFilteredUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// PrevMatcherUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), filter)
+}
+
+// Filter and push filters the nodes based on a matcher, and pushes the results
+// on the stack, with the srcSel as previous selection.
+func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
+ // Create a temporary Selection with the specified nodes to filter using winnow
+ sel := &Selection{nodes, srcSel.document, nil}
+ // Filter based on matcher and push on stack
+ return pushStack(srcSel, winnow(sel, m, true))
+}
+
+// Internal implementation of Find that return raw nodes.
+func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
+ // Map nodes to find the matches within the children of each node
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ // Go down one level, becausejQuery's Find selects only within descendants
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == html.ElementNode {
+ result = append(result, m.MatchAll(c)...)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation to get all parent nodes, stopping at the specified
+// node (or nil if no stop).
+func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ for p := n.Parent; p != nil; p = p.Parent {
+ sel := newSingleSelection(p, nil)
+ if stopm != nil {
+ if sel.IsMatcher(stopm) {
+ break
+ }
+ } else if len(stopNodes) > 0 {
+ if sel.IsNodes(stopNodes...) {
+ break
+ }
+ }
+ if p.Type == html.ElementNode {
+ result = append(result, p)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation of sibling nodes that return a raw slice of matches.
+func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
+ var f func(*html.Node) bool
+
+ // If the requested siblings are ...Until, create the test function to
+ // determine if the until condition is reached (returns true if it is)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ f = func(n *html.Node) bool {
+ if untilm != nil {
+ // Matcher-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsMatcher(untilm)
+ } else if len(untilNodes) > 0 {
+ // Nodes-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsNodes(untilNodes...)
+ }
+ return false
+ }
+ }
+
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n.Parent, st, n, f)
+ })
+}
+
+// Gets the children nodes of each node in the specified slice of nodes,
+// based on the sibling type request.
+func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n, st, nil, nil)
+ })
+}
+
+// Gets the children of the specified parent, based on the requested sibling
+// type, skipping a specified node if required.
+func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
+ untilFunc func(*html.Node) bool) (result []*html.Node) {
+
+ // Create the iterator function
+ var iter = func(cur *html.Node) (ret *html.Node) {
+ // Based on the sibling type requested, iterate the right way
+ for {
+ switch st {
+ case siblingAll, siblingAllIncludingNonElements:
+ if cur == nil {
+ // First iteration, start with first child of parent
+ // Skip node if required
+ if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ } else {
+ // Skip node if required
+ if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ }
+ case siblingPrev, siblingPrevAll, siblingPrevUntil:
+ if cur == nil {
+ // Start with previous sibling of the skip node
+ ret = skipNode.PrevSibling
+ } else {
+ ret = cur.PrevSibling
+ }
+ case siblingNext, siblingNextAll, siblingNextUntil:
+ if cur == nil {
+ // Start with next sibling of the skip node
+ ret = skipNode.NextSibling
+ } else {
+ ret = cur.NextSibling
+ }
+ default:
+ panic("Invalid sibling type.")
+ }
+ if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
+ return
+ }
+ // Not a valid node, try again from this one
+ cur = ret
+ }
+ }
+
+ for c := iter(nil); c != nil; c = iter(c) {
+ // If this is an ...Until case, test before append (returns true
+ // if the until condition is reached)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ if untilFunc(c) {
+ return
+ }
+ }
+ result = append(result, c)
+ if st == siblingNext || st == siblingPrev {
+ // Only one node was requested (immediate next or previous), so exit
+ return
+ }
+ }
+ return
+}
+
+// Internal implementation of parent nodes that return a raw slice of Nodes.
+func getParentNodes(nodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if n.Parent != nil && n.Parent.Type == html.ElementNode {
+ return []*html.Node{n.Parent}
+ }
+ return nil
+ })
+}
+
+// Internal map function used by many traversing methods. Takes the source nodes
+// to iterate on and the mapping function that returns an array of nodes.
+// Returns an array of nodes mapped by calling the callback function once for
+// each node in the source nodes.
+func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
+ set := make(map[*html.Node]bool)
+ for i, n := range nodes {
+ if vals := f(i, n); len(vals) > 0 {
+ result = appendWithoutDuplicates(result, vals, set)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/traversal_test.go b/vendor/github.com/PuerkitoBio/goquery/traversal_test.go
new file mode 100644
index 0000000..04383a4
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/traversal_test.go
@@ -0,0 +1,793 @@
+package goquery
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestFind(t *testing.T) {
+ sel := Doc().Find("div.row-fluid")
+ assertLength(t, sel.Nodes, 9)
+}
+
+func TestFindRollback(t *testing.T) {
+ sel := Doc().Find("div.row-fluid")
+ sel2 := sel.Find("a").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestFindNotSelf(t *testing.T) {
+ sel := Doc().Find("h1").Find("h1")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestFindInvalid(t *testing.T) {
+ sel := Doc().Find(":+ ^")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestFindBig(t *testing.T) {
+ doc := DocW()
+ sel := doc.Find("li")
+ assertLength(t, sel.Nodes, 373)
+ sel2 := doc.Find("span")
+ assertLength(t, sel2.Nodes, 448)
+ sel3 := sel.FindSelection(sel2)
+ assertLength(t, sel3.Nodes, 248)
+}
+
+func TestChainedFind(t *testing.T) {
+ sel := Doc().Find("div.hero-unit").Find(".row-fluid")
+ assertLength(t, sel.Nodes, 4)
+}
+
+func TestChainedFindInvalid(t *testing.T) {
+ sel := Doc().Find("div.hero-unit").Find("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestChildren(t *testing.T) {
+ sel := Doc().Find(".pvk-content").Children()
+ assertLength(t, sel.Nodes, 5)
+}
+
+func TestChildrenRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Children().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestContents(t *testing.T) {
+ sel := Doc().Find(".pvk-content").Contents()
+ assertLength(t, sel.Nodes, 13)
+}
+
+func TestContentsRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.Contents().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestChildrenFiltered(t *testing.T) {
+ sel := Doc().Find(".pvk-content").ChildrenFiltered(".hero-unit")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestChildrenFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".pvk-content").ChildrenFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestChildrenFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.ChildrenFiltered(".hero-unit").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestContentsFiltered(t *testing.T) {
+ sel := Doc().Find(".pvk-content").ContentsFiltered(".hero-unit")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestContentsFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".pvk-content").ContentsFiltered("~")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestContentsFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-content")
+ sel2 := sel.ContentsFiltered(".hero-unit").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestChildrenFilteredNone(t *testing.T) {
+ sel := Doc().Find(".pvk-content").ChildrenFiltered("a.btn")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestParent(t *testing.T) {
+ sel := Doc().Find(".container-fluid").Parent()
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestParentRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.Parent().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentBody(t *testing.T) {
+ sel := Doc().Find("body").Parent()
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestParentFiltered(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentFiltered(".hero-unit")
+ assertLength(t, sel.Nodes, 1)
+ assertClass(t, sel, "hero-unit")
+}
+
+func TestParentFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestParentFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ParentFiltered(".hero-unit").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParents(t *testing.T) {
+ sel := Doc().Find(".container-fluid").Parents()
+ assertLength(t, sel.Nodes, 8)
+}
+
+func TestParentsOrder(t *testing.T) {
+ sel := Doc().Find("#cf2").Parents()
+ assertLength(t, sel.Nodes, 6)
+ assertSelectionIs(t, sel, ".hero-unit", ".pvk-content", "div.row-fluid", "#cf1", "body", "html")
+}
+
+func TestParentsRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.Parents().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsFiltered(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsFiltered("body")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestParentsFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestParentsFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ParentsFiltered("body").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsUntil(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsUntil("body")
+ assertLength(t, sel.Nodes, 6)
+}
+
+func TestParentsUntilInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsUntil("")
+ assertLength(t, sel.Nodes, 8)
+}
+
+func TestParentsUntilRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ParentsUntil("body").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsUntilSelection(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".pvk-content")
+ sel = sel.ParentsUntilSelection(sel2)
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestParentsUntilSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".pvk-content")
+ sel2 = sel.ParentsUntilSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsUntilNodes(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".pvk-content, .hero-unit")
+ sel = sel.ParentsUntilNodes(sel2.Nodes...)
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestParentsUntilNodesRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".pvk-content, .hero-unit")
+ sel2 = sel.ParentsUntilNodes(sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsFilteredUntil(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsFilteredUntil(".pvk-content", "body")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestParentsFilteredUntilInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").ParentsFilteredUntil("", "")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestParentsFilteredUntilRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ParentsFilteredUntil(".pvk-content", "body").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsFilteredUntilSelection(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".row-fluid")
+ sel = sel.ParentsFilteredUntilSelection("div", sel2)
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestParentsFilteredUntilSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".row-fluid")
+ sel2 = sel.ParentsFilteredUntilSelection("div", sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestParentsFilteredUntilNodes(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".row-fluid")
+ sel = sel.ParentsFilteredUntilNodes("body", sel2.Nodes...)
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestParentsFilteredUntilNodesRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := Doc().Find(".row-fluid")
+ sel2 = sel.ParentsFilteredUntilNodes("body", sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestSiblings(t *testing.T) {
+ sel := Doc().Find("h1").Siblings()
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestSiblingsRollback(t *testing.T) {
+ sel := Doc().Find("h1")
+ sel2 := sel.Siblings().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestSiblings2(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").Siblings()
+ assertLength(t, sel.Nodes, 9)
+}
+
+func TestSiblings3(t *testing.T) {
+ sel := Doc().Find("body>.container-fluid").Siblings()
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestSiblingsFiltered(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").SiblingsFiltered(".pvk-content")
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestSiblingsFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").SiblingsFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestSiblingsFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter")
+ sel2 := sel.SiblingsFiltered(".pvk-content").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNext(t *testing.T) {
+ sel := Doc().Find("h1").Next()
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestNextRollback(t *testing.T) {
+ sel := Doc().Find("h1")
+ sel2 := sel.Next().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNext2(t *testing.T) {
+ sel := Doc().Find(".close").Next()
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestNextNone(t *testing.T) {
+ sel := Doc().Find("small").Next()
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestNextFiltered(t *testing.T) {
+ sel := Doc().Find(".container-fluid").NextFiltered("div")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestNextFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".container-fluid").NextFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestNextFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.NextFiltered("div").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextFiltered2(t *testing.T) {
+ sel := Doc().Find(".container-fluid").NextFiltered("[ng-view]")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestPrev(t *testing.T) {
+ sel := Doc().Find(".red").Prev()
+ assertLength(t, sel.Nodes, 1)
+ assertClass(t, sel, "green")
+}
+
+func TestPrevRollback(t *testing.T) {
+ sel := Doc().Find(".red")
+ sel2 := sel.Prev().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrev2(t *testing.T) {
+ sel := Doc().Find(".row-fluid").Prev()
+ assertLength(t, sel.Nodes, 5)
+}
+
+func TestPrevNone(t *testing.T) {
+ sel := Doc().Find("h2").Prev()
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestPrevFiltered(t *testing.T) {
+ sel := Doc().Find(".row-fluid").PrevFiltered(".row-fluid")
+ assertLength(t, sel.Nodes, 5)
+}
+
+func TestPrevFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".row-fluid").PrevFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestPrevFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".row-fluid")
+ sel2 := sel.PrevFiltered(".row-fluid").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextAll(t *testing.T) {
+ sel := Doc().Find("#cf2 div:nth-child(1)").NextAll()
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestNextAllRollback(t *testing.T) {
+ sel := Doc().Find("#cf2 div:nth-child(1)")
+ sel2 := sel.NextAll().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextAll2(t *testing.T) {
+ sel := Doc().Find("div[ng-cloak]").NextAll()
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestNextAllNone(t *testing.T) {
+ sel := Doc().Find(".footer").NextAll()
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestNextAllFiltered(t *testing.T) {
+ sel := Doc().Find("#cf2 .row-fluid").NextAllFiltered("[ng-cloak]")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestNextAllFilteredInvalid(t *testing.T) {
+ sel := Doc().Find("#cf2 .row-fluid").NextAllFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestNextAllFilteredRollback(t *testing.T) {
+ sel := Doc().Find("#cf2 .row-fluid")
+ sel2 := sel.NextAllFiltered("[ng-cloak]").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextAllFiltered2(t *testing.T) {
+ sel := Doc().Find(".close").NextAllFiltered("h4")
+ assertLength(t, sel.Nodes, 1)
+}
+
+func TestPrevAll(t *testing.T) {
+ sel := Doc().Find("[ng-view]").PrevAll()
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestPrevAllOrder(t *testing.T) {
+ sel := Doc().Find("[ng-view]").PrevAll()
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#cf4", "#cf3")
+}
+
+func TestPrevAllRollback(t *testing.T) {
+ sel := Doc().Find("[ng-view]")
+ sel2 := sel.PrevAll().End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrevAll2(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").PrevAll()
+ assertLength(t, sel.Nodes, 6)
+}
+
+func TestPrevAllFiltered(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").PrevAllFiltered(".pvk-content")
+ assertLength(t, sel.Nodes, 3)
+}
+
+func TestPrevAllFilteredInvalid(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter").PrevAllFiltered("")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestPrevAllFilteredRollback(t *testing.T) {
+ sel := Doc().Find(".pvk-gutter")
+ sel2 := sel.PrevAllFiltered(".pvk-content").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextUntil(t *testing.T) {
+ sel := Doc().Find(".alert a").NextUntil("p")
+ assertLength(t, sel.Nodes, 1)
+ assertSelectionIs(t, sel, "h4")
+}
+
+func TestNextUntilInvalid(t *testing.T) {
+ sel := Doc().Find(".alert a").NextUntil("")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestNextUntil2(t *testing.T) {
+ sel := Doc().Find("#cf2-1").NextUntil("[ng-cloak]")
+ assertLength(t, sel.Nodes, 1)
+ assertSelectionIs(t, sel, "#cf2-2")
+}
+
+func TestNextUntilOrder(t *testing.T) {
+ sel := Doc().Find("#cf2-1").NextUntil("#cf2-4")
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#cf2-2", "#cf2-3")
+}
+
+func TestNextUntilRollback(t *testing.T) {
+ sel := Doc().Find("#cf2-1")
+ sel2 := sel.PrevUntil("#cf2-4").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextUntilSelection(t *testing.T) {
+ sel := Doc2().Find("#n2")
+ sel2 := Doc2().Find("#n4")
+ sel2 = sel.NextUntilSelection(sel2)
+ assertLength(t, sel2.Nodes, 1)
+ assertSelectionIs(t, sel2, "#n3")
+}
+
+func TestNextUntilSelectionRollback(t *testing.T) {
+ sel := Doc2().Find("#n2")
+ sel2 := Doc2().Find("#n4")
+ sel2 = sel.NextUntilSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextUntilNodes(t *testing.T) {
+ sel := Doc2().Find("#n2")
+ sel2 := Doc2().Find("#n5")
+ sel2 = sel.NextUntilNodes(sel2.Nodes...)
+ assertLength(t, sel2.Nodes, 2)
+ assertSelectionIs(t, sel2, "#n3", "#n4")
+}
+
+func TestNextUntilNodesRollback(t *testing.T) {
+ sel := Doc2().Find("#n2")
+ sel2 := Doc2().Find("#n5")
+ sel2 = sel.NextUntilNodes(sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrevUntil(t *testing.T) {
+ sel := Doc().Find(".alert p").PrevUntil("a")
+ assertLength(t, sel.Nodes, 1)
+ assertSelectionIs(t, sel, "h4")
+}
+
+func TestPrevUntilInvalid(t *testing.T) {
+ sel := Doc().Find(".alert p").PrevUntil("")
+ assertLength(t, sel.Nodes, 2)
+}
+
+func TestPrevUntil2(t *testing.T) {
+ sel := Doc().Find("[ng-cloak]").PrevUntil(":not([ng-cloak])")
+ assertLength(t, sel.Nodes, 1)
+ assertSelectionIs(t, sel, "[ng-cloak]")
+}
+
+func TestPrevUntilOrder(t *testing.T) {
+ sel := Doc().Find("#cf2-4").PrevUntil("#cf2-1")
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#cf2-3", "#cf2-2")
+}
+
+func TestPrevUntilRollback(t *testing.T) {
+ sel := Doc().Find("#cf2-4")
+ sel2 := sel.PrevUntil("#cf2-1").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrevUntilSelection(t *testing.T) {
+ sel := Doc2().Find("#n4")
+ sel2 := Doc2().Find("#n2")
+ sel2 = sel.PrevUntilSelection(sel2)
+ assertLength(t, sel2.Nodes, 1)
+ assertSelectionIs(t, sel2, "#n3")
+}
+
+func TestPrevUntilSelectionRollback(t *testing.T) {
+ sel := Doc2().Find("#n4")
+ sel2 := Doc2().Find("#n2")
+ sel2 = sel.PrevUntilSelection(sel2).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrevUntilNodes(t *testing.T) {
+ sel := Doc2().Find("#n5")
+ sel2 := Doc2().Find("#n2")
+ sel2 = sel.PrevUntilNodes(sel2.Nodes...)
+ assertLength(t, sel2.Nodes, 2)
+ assertSelectionIs(t, sel2, "#n4", "#n3")
+}
+
+func TestPrevUntilNodesRollback(t *testing.T) {
+ sel := Doc2().Find("#n5")
+ sel2 := Doc2().Find("#n2")
+ sel2 = sel.PrevUntilNodes(sel2.Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextFilteredUntil(t *testing.T) {
+ sel := Doc2().Find(".two").NextFilteredUntil(".even", ".six")
+ assertLength(t, sel.Nodes, 4)
+ assertSelectionIs(t, sel, "#n3", "#n5", "#nf3", "#nf5")
+}
+
+func TestNextFilteredUntilInvalid(t *testing.T) {
+ sel := Doc2().Find(".two").NextFilteredUntil("", "")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestNextFilteredUntilRollback(t *testing.T) {
+ sel := Doc2().Find(".two")
+ sel2 := sel.NextFilteredUntil(".even", ".six").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestNextFilteredUntilSelection(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".five")
+ sel = sel.NextFilteredUntilSelection(".even", sel2)
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#n3", "#nf3")
+}
+
+func TestNextFilteredUntilSelectionRollback(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".five")
+ sel3 := sel.NextFilteredUntilSelection(".even", sel2).End()
+ assertEqual(t, sel, sel3)
+}
+
+func TestNextFilteredUntilNodes(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".four")
+ sel = sel.NextFilteredUntilNodes(".odd", sel2.Nodes...)
+ assertLength(t, sel.Nodes, 4)
+ assertSelectionIs(t, sel, "#n2", "#n6", "#nf2", "#nf6")
+}
+
+func TestNextFilteredUntilNodesRollback(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".four")
+ sel3 := sel.NextFilteredUntilNodes(".odd", sel2.Nodes...).End()
+ assertEqual(t, sel, sel3)
+}
+
+func TestPrevFilteredUntil(t *testing.T) {
+ sel := Doc2().Find(".five").PrevFilteredUntil(".odd", ".one")
+ assertLength(t, sel.Nodes, 4)
+ assertSelectionIs(t, sel, "#n4", "#n2", "#nf4", "#nf2")
+}
+
+func TestPrevFilteredUntilInvalid(t *testing.T) {
+ sel := Doc2().Find(".five").PrevFilteredUntil("", "")
+ assertLength(t, sel.Nodes, 0)
+}
+
+func TestPrevFilteredUntilRollback(t *testing.T) {
+ sel := Doc2().Find(".four")
+ sel2 := sel.PrevFilteredUntil(".odd", ".one").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestPrevFilteredUntilSelection(t *testing.T) {
+ sel := Doc2().Find(".odd")
+ sel2 := Doc2().Find(".two")
+ sel = sel.PrevFilteredUntilSelection(".odd", sel2)
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#n4", "#nf4")
+}
+
+func TestPrevFilteredUntilSelectionRollback(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".five")
+ sel3 := sel.PrevFilteredUntilSelection(".even", sel2).End()
+ assertEqual(t, sel, sel3)
+}
+
+func TestPrevFilteredUntilNodes(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".four")
+ sel = sel.PrevFilteredUntilNodes(".odd", sel2.Nodes...)
+ assertLength(t, sel.Nodes, 2)
+ assertSelectionIs(t, sel, "#n2", "#nf2")
+}
+
+func TestPrevFilteredUntilNodesRollback(t *testing.T) {
+ sel := Doc2().Find(".even")
+ sel2 := Doc2().Find(".four")
+ sel3 := sel.PrevFilteredUntilNodes(".odd", sel2.Nodes...).End()
+ assertEqual(t, sel, sel3)
+}
+
+func TestClosestItself(t *testing.T) {
+ sel := Doc2().Find(".three")
+ sel2 := sel.Closest(".row")
+ assertLength(t, sel2.Nodes, sel.Length())
+ assertSelectionIs(t, sel2, "#n3", "#nf3")
+}
+
+func TestClosestNoDupes(t *testing.T) {
+ sel := Doc().Find(".span12")
+ sel2 := sel.Closest(".pvk-content")
+ assertLength(t, sel2.Nodes, 1)
+ assertClass(t, sel2, "pvk-content")
+}
+
+func TestClosestNone(t *testing.T) {
+ sel := Doc().Find("h4")
+ sel2 := sel.Closest("a")
+ assertLength(t, sel2.Nodes, 0)
+}
+
+func TestClosestInvalid(t *testing.T) {
+ sel := Doc().Find("h4")
+ sel2 := sel.Closest("")
+ assertLength(t, sel2.Nodes, 0)
+}
+
+func TestClosestMany(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.Closest(".pvk-content")
+ assertLength(t, sel2.Nodes, 2)
+ assertSelectionIs(t, sel2, "#pc1", "#pc2")
+}
+
+func TestClosestRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.Closest(".pvk-content").End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestClosestSelectionItself(t *testing.T) {
+ sel := Doc2().Find(".three")
+ sel2 := sel.ClosestSelection(Doc2().Find(".row"))
+ assertLength(t, sel2.Nodes, sel.Length())
+}
+
+func TestClosestSelectionNoDupes(t *testing.T) {
+ sel := Doc().Find(".span12")
+ sel2 := sel.ClosestSelection(Doc().Find(".pvk-content"))
+ assertLength(t, sel2.Nodes, 1)
+ assertClass(t, sel2, "pvk-content")
+}
+
+func TestClosestSelectionNone(t *testing.T) {
+ sel := Doc().Find("h4")
+ sel2 := sel.ClosestSelection(Doc().Find("a"))
+ assertLength(t, sel2.Nodes, 0)
+}
+
+func TestClosestSelectionMany(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ClosestSelection(Doc().Find(".pvk-content"))
+ assertLength(t, sel2.Nodes, 2)
+ assertSelectionIs(t, sel2, "#pc1", "#pc2")
+}
+
+func TestClosestSelectionRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ClosestSelection(Doc().Find(".pvk-content")).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestClosestNodesItself(t *testing.T) {
+ sel := Doc2().Find(".three")
+ sel2 := sel.ClosestNodes(Doc2().Find(".row").Nodes...)
+ assertLength(t, sel2.Nodes, sel.Length())
+}
+
+func TestClosestNodesNoDupes(t *testing.T) {
+ sel := Doc().Find(".span12")
+ sel2 := sel.ClosestNodes(Doc().Find(".pvk-content").Nodes...)
+ assertLength(t, sel2.Nodes, 1)
+ assertClass(t, sel2, "pvk-content")
+}
+
+func TestClosestNodesNone(t *testing.T) {
+ sel := Doc().Find("h4")
+ sel2 := sel.ClosestNodes(Doc().Find("a").Nodes...)
+ assertLength(t, sel2.Nodes, 0)
+}
+
+func TestClosestNodesMany(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ClosestNodes(Doc().Find(".pvk-content").Nodes...)
+ assertLength(t, sel2.Nodes, 2)
+ assertSelectionIs(t, sel2, "#pc1", "#pc2")
+}
+
+func TestClosestNodesRollback(t *testing.T) {
+ sel := Doc().Find(".container-fluid")
+ sel2 := sel.ClosestNodes(Doc().Find(".pvk-content").Nodes...).End()
+ assertEqual(t, sel, sel2)
+}
+
+func TestIssue26(t *testing.T) {
+ img1 := ``
+ img2 := ``
+ cases := []struct {
+ s string
+ l int
+ }{
+ {s: img1 + img2, l: 2},
+ {s: img1, l: 1},
+ {s: img2, l: 1},
+ }
+ for _, c := range cases {
+ doc, err := NewDocumentFromReader(strings.NewReader(c.s))
+ if err != nil {
+ t.Fatal(err)
+ }
+ sel := doc.Find("img[src]")
+ assertLength(t, sel.Nodes, c.l)
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/type.go b/vendor/github.com/PuerkitoBio/goquery/type.go
new file mode 100644
index 0000000..6ad51db
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/type.go
@@ -0,0 +1,141 @@
+package goquery
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/andybalholm/cascadia"
+
+ "golang.org/x/net/html"
+)
+
+// Document represents an HTML document to be manipulated. Unlike jQuery, which
+// is loaded as part of a DOM document, and thus acts upon its containing
+// document, GoQuery doesn't know which HTML document to act upon. So it needs
+// to be told, and that's what the Document class is for. It holds the root
+// document node to manipulate, and can make selections on this document.
+type Document struct {
+ *Selection
+ Url *url.URL
+ rootNode *html.Node
+}
+
+// NewDocumentFromNode is a Document constructor that takes a root html Node
+// as argument.
+func NewDocumentFromNode(root *html.Node) *Document {
+ return newDocument(root, nil)
+}
+
+// NewDocument is a Document constructor that takes a string URL as argument.
+// It loads the specified document, parses it, and stores the root Document
+// node, ready to be manipulated.
+//
+// Deprecated: Use the net/http standard library package to make the request
+// and validate the response before calling goquery.NewDocumentFromReader
+// with the response's body.
+func NewDocument(url string) (*Document, error) {
+ // Load the URL
+ res, e := http.Get(url)
+ if e != nil {
+ return nil, e
+ }
+ return NewDocumentFromResponse(res)
+}
+
+// NewDocumentFromReader returns a Document from an io.Reader.
+// It returns an error as second value if the reader's data cannot be parsed
+// as html. It does not check if the reader is also an io.Closer, the
+// provided reader is never closed by this call. It is the responsibility
+// of the caller to close it if required.
+func NewDocumentFromReader(r io.Reader) (*Document, error) {
+ root, e := html.Parse(r)
+ if e != nil {
+ return nil, e
+ }
+ return newDocument(root, nil), nil
+}
+
+// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
+// It loads the specified response's document, parses it, and stores the root Document
+// node, ready to be manipulated. The response's body is closed on return.
+//
+// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
+func NewDocumentFromResponse(res *http.Response) (*Document, error) {
+ if res == nil {
+ return nil, errors.New("Response is nil")
+ }
+ defer res.Body.Close()
+ if res.Request == nil {
+ return nil, errors.New("Response.Request is nil")
+ }
+
+ // Parse the HTML into nodes
+ root, e := html.Parse(res.Body)
+ if e != nil {
+ return nil, e
+ }
+
+ // Create and fill the document
+ return newDocument(root, res.Request.URL), nil
+}
+
+// CloneDocument creates a deep-clone of a document.
+func CloneDocument(doc *Document) *Document {
+ return newDocument(cloneNode(doc.rootNode), doc.Url)
+}
+
+// Private constructor, make sure all fields are correctly filled.
+func newDocument(root *html.Node, url *url.URL) *Document {
+ // Create and fill the document
+ d := &Document{nil, url, root}
+ d.Selection = newSingleSelection(root, d)
+ return d
+}
+
+// Selection represents a collection of nodes matching some criteria. The
+// initial Selection can be created by using Document.Find, and then
+// manipulated using the jQuery-like chainable syntax and methods.
+type Selection struct {
+ Nodes []*html.Node
+ document *Document
+ prevSel *Selection
+}
+
+// Helper constructor to create an empty selection
+func newEmptySelection(doc *Document) *Selection {
+ return &Selection{nil, doc, nil}
+}
+
+// Helper constructor to create a selection of only one node
+func newSingleSelection(node *html.Node, doc *Document) *Selection {
+ return &Selection{[]*html.Node{node}, doc, nil}
+}
+
+// Matcher is an interface that defines the methods to match
+// HTML nodes against a compiled selector string. Cascadia's
+// Selector implements this interface.
+type Matcher interface {
+ Match(*html.Node) bool
+ MatchAll(*html.Node) []*html.Node
+ Filter([]*html.Node) []*html.Node
+}
+
+// compileMatcher compiles the selector string s and returns
+// the corresponding Matcher. If s is an invalid selector string,
+// it returns a Matcher that fails all matches.
+func compileMatcher(s string) Matcher {
+ cs, err := cascadia.Compile(s)
+ if err != nil {
+ return invalidMatcher{}
+ }
+ return cs
+}
+
+// invalidMatcher is a Matcher that always fails to match.
+type invalidMatcher struct{}
+
+func (invalidMatcher) Match(n *html.Node) bool { return false }
+func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
+func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }
diff --git a/vendor/github.com/PuerkitoBio/goquery/type_test.go b/vendor/github.com/PuerkitoBio/goquery/type_test.go
new file mode 100644
index 0000000..1e82d5e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/type_test.go
@@ -0,0 +1,202 @@
+package goquery
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/html"
+)
+
+// Test helper functions and members
+var doc *Document
+var doc2 *Document
+var doc3 *Document
+var docB *Document
+var docW *Document
+
+func Doc() *Document {
+ if doc == nil {
+ doc = loadDoc("page.html")
+ }
+ return doc
+}
+
+func Doc2() *Document {
+ if doc2 == nil {
+ doc2 = loadDoc("page2.html")
+ }
+ return doc2
+}
+
+func Doc2Clone() *Document {
+ return CloneDocument(Doc2())
+}
+
+func Doc3() *Document {
+ if doc3 == nil {
+ doc3 = loadDoc("page3.html")
+ }
+ return doc3
+}
+
+func Doc3Clone() *Document {
+ return CloneDocument(Doc3())
+}
+
+func DocB() *Document {
+ if docB == nil {
+ docB = loadDoc("gotesting.html")
+ }
+ return docB
+}
+
+func DocW() *Document {
+ if docW == nil {
+ docW = loadDoc("gowiki.html")
+ }
+ return docW
+}
+
+func assertLength(t *testing.T, nodes []*html.Node, length int) {
+ if len(nodes) != length {
+ t.Errorf("Expected %d nodes, found %d.", length, len(nodes))
+ for i, n := range nodes {
+ t.Logf("Node %d: %+v.", i, n)
+ }
+ }
+}
+
+func assertClass(t *testing.T, sel *Selection, class string) {
+ if !sel.HasClass(class) {
+ t.Errorf("Expected node to have class %s, found %+v.", class, sel.Get(0))
+ }
+}
+
+func assertPanic(t *testing.T) {
+ if e := recover(); e == nil {
+ t.Error("Expected a panic.")
+ }
+}
+
+func assertEqual(t *testing.T, s1 *Selection, s2 *Selection) {
+ if s1 != s2 {
+ t.Error("Expected selection objects to be the same.")
+ }
+}
+
+func assertSelectionIs(t *testing.T, sel *Selection, is ...string) {
+ for i := 0; i < sel.Length(); i++ {
+ if !sel.Eq(i).Is(is[i]) {
+ t.Errorf("Expected node %d to be %s, found %+v", i, is[i], sel.Get(i))
+ }
+ }
+}
+
+func printSel(t *testing.T, sel *Selection) {
+ if testing.Verbose() {
+ h, err := sel.Html()
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Log(h)
+ }
+}
+
+func loadDoc(page string) *Document {
+ var f *os.File
+ var e error
+
+ if f, e = os.Open(fmt.Sprintf("./testdata/%s", page)); e != nil {
+ panic(e.Error())
+ }
+ defer f.Close()
+
+ var node *html.Node
+ if node, e = html.Parse(f); e != nil {
+ panic(e.Error())
+ }
+ return NewDocumentFromNode(node)
+}
+
+func TestNewDocument(t *testing.T) {
+ if f, e := os.Open("./testdata/page.html"); e != nil {
+ t.Error(e.Error())
+ } else {
+ defer f.Close()
+ if node, e := html.Parse(f); e != nil {
+ t.Error(e.Error())
+ } else {
+ doc = NewDocumentFromNode(node)
+ }
+ }
+}
+
+func TestNewDocumentFromReader(t *testing.T) {
+ cases := []struct {
+ src string
+ err bool
+ sel string
+ cnt int
+ }{
+ 0: {
+ src: `
+
+
+Test
+
+
Hi
+
+`,
+ sel: "h1",
+ cnt: 1,
+ },
+ 1: {
+ // Actually pretty hard to make html.Parse return an error
+ // based on content...
+ src: `>>qq>`,
+ },
+ }
+ buf := bytes.NewBuffer(nil)
+
+ for i, c := range cases {
+ buf.Reset()
+ buf.WriteString(c.src)
+
+ d, e := NewDocumentFromReader(buf)
+ if (e != nil) != c.err {
+ if c.err {
+ t.Errorf("[%d] - expected error, got none", i)
+ } else {
+ t.Errorf("[%d] - expected no error, got %s", i, e)
+ }
+ }
+ if c.sel != "" {
+ s := d.Find(c.sel)
+ if s.Length() != c.cnt {
+ t.Errorf("[%d] - expected %d nodes, found %d", i, c.cnt, s.Length())
+ }
+ }
+ }
+}
+
+func TestNewDocumentFromResponseNil(t *testing.T) {
+ _, e := NewDocumentFromResponse(nil)
+ if e == nil {
+ t.Error("Expected error, got none")
+ }
+}
+
+func TestIssue103(t *testing.T) {
+ d, err := NewDocumentFromReader(strings.NewReader("Scientists Stored These Images in DNA—Then Flawlessly Retrieved Them"))
+ if err != nil {
+ t.Error(err)
+ }
+ text := d.Find("title").Text()
+ for i, r := range text {
+ t.Logf("%d: %d - %q\n", i, r, string(r))
+ }
+ t.Log(text)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities.go b/vendor/github.com/PuerkitoBio/goquery/utilities.go
new file mode 100644
index 0000000..b4c061a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/utilities.go
@@ -0,0 +1,161 @@
+package goquery
+
+import (
+ "bytes"
+
+ "golang.org/x/net/html"
+)
+
+// used to determine if a set (map[*html.Node]bool) should be used
+// instead of iterating over a slice. The set uses more memory and
+// is slower than slice iteration for small N.
+const minNodesForSet = 1000
+
+var nodeNames = []string{
+ html.ErrorNode: "#error",
+ html.TextNode: "#text",
+ html.DocumentNode: "#document",
+ html.CommentNode: "#comment",
+}
+
+// NodeName returns the node name of the first element in the selection.
+// It tries to behave in a similar way as the DOM's nodeName property
+// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
+//
+// Go's net/html package defines the following node types, listed with
+// the corresponding returned value from this function:
+//
+// ErrorNode : #error
+// TextNode : #text
+// DocumentNode : #document
+// ElementNode : the element's tag name
+// CommentNode : #comment
+// DoctypeNode : the name of the document type
+//
+func NodeName(s *Selection) string {
+ if s.Length() == 0 {
+ return ""
+ }
+ switch n := s.Get(0); n.Type {
+ case html.ElementNode, html.DoctypeNode:
+ return n.Data
+ default:
+ if n.Type >= 0 && int(n.Type) < len(nodeNames) {
+ return nodeNames[n.Type]
+ }
+ return ""
+ }
+}
+
+// OuterHtml returns the outer HTML rendering of the first item in
+// the selection - that is, the HTML including the first element's
+// tag and attributes.
+//
+// Unlike InnerHtml, this is a function and not a method on the Selection,
+// because this is not a jQuery method (in javascript-land, this is
+// a property provided by the DOM).
+func OuterHtml(s *Selection) (string, error) {
+ var buf bytes.Buffer
+
+ if s.Length() == 0 {
+ return "", nil
+ }
+ n := s.Get(0)
+ if err := html.Render(&buf, n); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// Loop through all container nodes to search for the target node.
+func sliceContains(container []*html.Node, contained *html.Node) bool {
+ for _, n := range container {
+ if nodeContains(n, contained) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Checks if the contained node is within the container node.
+func nodeContains(container *html.Node, contained *html.Node) bool {
+ // Check if the parent of the contained node is the container node, traversing
+ // upward until the top is reached, or the container is found.
+ for contained = contained.Parent; contained != nil; contained = contained.Parent {
+ if container == contained {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if the target node is in the slice of nodes.
+func isInSlice(slice []*html.Node, node *html.Node) bool {
+ return indexInSlice(slice, node) > -1
+}
+
+// Returns the index of the target node in the slice, or -1.
+func indexInSlice(slice []*html.Node, node *html.Node) int {
+ if node != nil {
+ for i, n := range slice {
+ if n == node {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// Appends the new nodes to the target slice, making sure no duplicate is added.
+// There is no check to the original state of the target slice, so it may still
+// contain duplicates. The target slice is returned because append() may create
+// a new underlying array. If targetSet is nil, a local set is created with the
+// target if len(target) + len(nodes) is greater than minNodesForSet.
+func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
+ // if there are not that many nodes, don't use the map, faster to just use nested loops
+ // (unless a non-nil targetSet is passed, in which case the caller knows better).
+ if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
+ for _, n := range nodes {
+ if !isInSlice(target, n) {
+ target = append(target, n)
+ }
+ }
+ return target
+ }
+
+ // if a targetSet is passed, then assume it is reliable, otherwise create one
+ // and initialize it with the current target contents.
+ if targetSet == nil {
+ targetSet = make(map[*html.Node]bool, len(target))
+ for _, n := range target {
+ targetSet[n] = true
+ }
+ }
+ for _, n := range nodes {
+ if !targetSet[n] {
+ target = append(target, n)
+ targetSet[n] = true
+ }
+ }
+
+ return target
+}
+
+// Loop through a selection, returning only those nodes that pass the predicate
+// function.
+func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
+ for i, n := range sel.Nodes {
+ if predicate(i, newSingleSelection(n, sel.document)) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+// Creates a new Selection object based on the specified nodes, and keeps the
+// source Selection object on the stack (linked list).
+func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
+ result := &Selection{nodes, fromSel.document, fromSel}
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities_test.go b/vendor/github.com/PuerkitoBio/goquery/utilities_test.go
new file mode 100644
index 0000000..c8e9d54
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/utilities_test.go
@@ -0,0 +1,128 @@
+package goquery
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/html"
+)
+
+var allNodes = `
+
+
+
+
+
+
`,
+ `[href#=(^https:\/\/[^\/]*\/?news)]`,
+ []string{
+ ``,
+ },
+ },
+ {
+ ``,
+ `:input`,
+ []string{
+ ``,
+ ``,
+ ``,
+ ``,
+ ``,
+ },
+ },
+ {
+ ``,
+ ":root",
+ []string{
+ "",
+ },
+ },
+ {
+ ``,
+ "*:root",
+ []string{
+ "",
+ },
+ },
+ {
+ ``,
+ "*:root:first-child",
+ []string{},
+ },
+ {
+ ``,
+ "*:root:nth-child(1)",
+ []string{},
+ },
+ {
+ ``,
+ "a:not(:root)",
+ []string{
+ ``,
+ },
+ },
+}
+
+func TestSelectors(t *testing.T) {
+ for _, test := range selectorTests {
+ s, err := Compile(test.selector)
+ if err != nil {
+ t.Errorf("error compiling %q: %s", test.selector, err)
+ continue
+ }
+
+ doc, err := html.Parse(strings.NewReader(test.HTML))
+ if err != nil {
+ t.Errorf("error parsing %q: %s", test.HTML, err)
+ continue
+ }
+
+ matches := s.MatchAll(doc)
+ if len(matches) != len(test.results) {
+ t.Errorf("selector %s wanted %d elements, got %d instead", test.selector, len(test.results), len(matches))
+ continue
+ }
+
+ for i, m := range matches {
+ got := nodeString(m)
+ if got != test.results[i] {
+ t.Errorf("selector %s wanted %s, got %s instead", test.selector, test.results[i], got)
+ }
+ }
+
+ firstMatch := s.MatchFirst(doc)
+ if len(test.results) == 0 {
+ if firstMatch != nil {
+ t.Errorf("MatchFirst: selector %s want nil, got %s", test.selector, nodeString(firstMatch))
+ }
+ } else {
+ got := nodeString(firstMatch)
+ if got != test.results[0] {
+ t.Errorf("MatchFirst: selector %s want %s, got %s", test.selector, test.results[0], got)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antchfx/htmlquery/.gitignore b/vendor/github.com/antchfx/htmlquery/.gitignore
new file mode 100644
index 0000000..4d5d27b
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/.gitignore
@@ -0,0 +1,32 @@
+# vscode
+.vscode
+debug
+*.test
+
+./build
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/.travis.yml b/vendor/github.com/antchfx/htmlquery/.travis.yml
new file mode 100644
index 0000000..1f72256
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+
+install:
+ - go get golang.org/x/net/html/charset
+ - go get golang.org/x/net/html
+ - go get github.com/antchfx/xpath
+ - go get github.com/mattn/goveralls
+
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/LICENSE b/vendor/github.com/antchfx/htmlquery/LICENSE
new file mode 100644
index 0000000..e14c371
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/htmlquery/README.md b/vendor/github.com/antchfx/htmlquery/README.md
new file mode 100644
index 0000000..0f466cb
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/README.md
@@ -0,0 +1,102 @@
+htmlquery
+====
+[![Build Status](https://travis-ci.org/antchfx/htmlquery.svg?branch=master)](https://travis-ci.org/antchfx/htmlquery)
+[![Coverage Status](https://coveralls.io/repos/github/antchfx/htmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/htmlquery?branch=master)
+[![GoDoc](https://godoc.org/github.com/antchfx/htmlquery?status.svg)](https://godoc.org/github.com/antchfx/htmlquery)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/htmlquery)](https://goreportcard.com/report/github.com/antchfx/htmlquery)
+
+Overview
+====
+
+htmlquery is an XPath query package for HTML, lets you extract data or evaluate from HTML documents by an XPath expression.
+
+Changelogs
+===
+
+2019-02-04
+- [#7](https://github.com/antchfx/htmlquery/issues/7) Removed deprecated `FindEach()` and `FindEachWithBreak()` methods.
+
+2018-12-28
+- Avoid adding duplicate elements to list for `Find()` method. [#6](https://github.com/antchfx/htmlquery/issues/6)
+
+Installation
+====
+
+> $ go get github.com/antchfx/htmlquery
+
+Getting Started
+====
+
+#### Load HTML document from URL.
+
+```go
+doc, err := htmlquery.LoadURL("http://example.com/")
+```
+
+#### Load HTML document from string.
+
+```go
+s := `....`
+doc, err := htmlquery.Parse(strings.NewReader(s))
+```
+
+#### Find all A elements.
+
+```go
+list := htmlquery.Find(doc, "//a")
+```
+
+#### Find all A elements that have `href` attribute.
+
+```go
+list := range htmlquery.Find(doc, "//a[@href]")
+```
+
+#### Find all A elements and only get `href` attribute self.
+
+```go
+list := range htmlquery.Find(doc, "//a/@href")
+```
+
+### Find the third A element.
+
+```go
+a := htmlquery.FindOne(doc, "//a[3]")
+```
+
+#### Evaluate the number of all IMG element.
+
+```go
+expr, _ := xpath.Compile("count(//img)")
+v := expr.Evaluate(htmlquery.CreateXPathNavigator(doc)).(float64)
+fmt.Printf("total count is %f", v)
+```
+
+Quick Tutorial
+===
+
+```go
+func main() {
+ doc, err := htmlquery.LoadURL("https://www.bing.com/search?q=golang")
+ if err != nil {
+ panic(err)
+ }
+ // Find all news item.
+ for i, n := range htmlquery.Find(doc, "//ol/li") {
+ a := htmlquery.FindOne(n, "//a")
+ fmt.Printf("%d %s(%s)\n", i, htmlquery.InnerText(a), htmlquery.SelectAttr(a, "href"))
+ }
+}
+```
+
+List of supported XPath query packages
+===
+|Name |Description |
+|--------------------------|----------------|
+|[htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document|
+|[xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document|
+|[jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document|
+
+Questions
+===
+Please let me know if you have any questions.
diff --git a/vendor/github.com/antchfx/htmlquery/query.go b/vendor/github.com/antchfx/htmlquery/query.go
new file mode 100644
index 0000000..37d30b9
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/query.go
@@ -0,0 +1,291 @@
+/*
+Package htmlquery provides extract data from HTML documents using XPath expression.
+*/
+package htmlquery
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/antchfx/xpath"
+ "golang.org/x/net/html"
+ "golang.org/x/net/html/charset"
+)
+
+var _ xpath.NodeNavigator = &NodeNavigator{}
+
+// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
+func CreateXPathNavigator(top *html.Node) *NodeNavigator {
+ return &NodeNavigator{curr: top, root: top, attr: -1}
+}
+
+// Find searches the html.Node that matches by the specified XPath expr.
+func Find(top *html.Node, expr string) []*html.Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ var elems []*html.Node
+ t := exp.Select(CreateXPathNavigator(top))
+ for t.MoveNext() {
+ nav := t.Current().(*NodeNavigator)
+ n := getCurrentNode(nav)
+ // avoid adding duplicate nodes.
+ if len(elems) > 0 && (elems[0] == n || (nav.NodeType() == xpath.AttributeNode &&
+ nav.LocalName() == elems[0].Data && nav.Value() == InnerText(elems[0]))) {
+ continue
+ }
+ elems = append(elems, n)
+ }
+ return elems
+}
+
+// FindOne searches the html.Node that matches by the specified XPath expr,
+// and returns first element of matched html.Node.
+func FindOne(top *html.Node, expr string) *html.Node {
+ var elem *html.Node
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ if t.MoveNext() {
+ elem = getCurrentNode(t.Current().(*NodeNavigator))
+ }
+ return elem
+}
+
+// LoadURL loads the HTML document from the specified URL.
+func LoadURL(url string) (*html.Node, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ r, err := charset.NewReader(resp.Body, resp.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, err
+ }
+ return html.Parse(r)
+}
+
+func getCurrentNode(n *NodeNavigator) *html.Node {
+ if n.NodeType() == xpath.AttributeNode {
+ childNode := &html.Node{
+ Type: html.TextNode,
+ Data: n.Value(),
+ }
+ return &html.Node{
+ Type: html.ElementNode,
+ Data: n.LocalName(),
+ FirstChild: childNode,
+ LastChild: childNode,
+ }
+
+ }
+ return n.curr
+}
+
+// Parse returns the parse tree for the HTML from the given Reader.
+func Parse(r io.Reader) (*html.Node, error) {
+ return html.Parse(r)
+}
+
+// InnerText returns the text between the start and end tags of the object.
+func InnerText(n *html.Node) string {
+ var output func(*bytes.Buffer, *html.Node)
+ output = func(buf *bytes.Buffer, n *html.Node) {
+ switch n.Type {
+ case html.TextNode:
+ buf.WriteString(n.Data)
+ return
+ case html.CommentNode:
+ return
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ output(buf, child)
+ }
+ }
+
+ var buf bytes.Buffer
+ output(&buf, n)
+ return buf.String()
+}
+
+// SelectAttr returns the attribute value with the specified name.
+func SelectAttr(n *html.Node, name string) (val string) {
+ if n == nil {
+ return
+ }
+ if n.Type == html.ElementNode && n.Parent == nil && name == n.Data {
+ return InnerText(n)
+ }
+ for _, attr := range n.Attr {
+ if attr.Key == name {
+ val = attr.Val
+ break
+ }
+ }
+ return
+}
+
+// OutputHTML returns the text including tags name.
+func OutputHTML(n *html.Node, self bool) string {
+ var buf bytes.Buffer
+ if self {
+ html.Render(&buf, n)
+ } else {
+ for n := n.FirstChild; n != nil; n = n.NextSibling {
+ html.Render(&buf, n)
+ }
+ }
+ return buf.String()
+}
+
+type NodeNavigator struct {
+ root, curr *html.Node
+ attr int
+}
+
+func (h *NodeNavigator) Current() *html.Node {
+ return h.curr
+}
+
+func (h *NodeNavigator) NodeType() xpath.NodeType {
+ switch h.curr.Type {
+ case html.CommentNode:
+ return xpath.CommentNode
+ case html.TextNode:
+ return xpath.TextNode
+ case html.DocumentNode:
+ return xpath.RootNode
+ case html.ElementNode:
+ if h.attr != -1 {
+ return xpath.AttributeNode
+ }
+ return xpath.ElementNode
+ case html.DoctypeNode:
+ // ignored declare and as Root-Node type.
+ return xpath.RootNode
+ }
+ panic(fmt.Sprintf("unknown HTML node type: %v", h.curr.Type))
+}
+
+func (h *NodeNavigator) LocalName() string {
+ if h.attr != -1 {
+ return h.curr.Attr[h.attr].Key
+ }
+ return h.curr.Data
+}
+
+func (*NodeNavigator) Prefix() string {
+ return ""
+}
+
+func (h *NodeNavigator) Value() string {
+ switch h.curr.Type {
+ case html.CommentNode:
+ return h.curr.Data
+ case html.ElementNode:
+ if h.attr != -1 {
+ return h.curr.Attr[h.attr].Val
+ }
+ return InnerText(h.curr)
+ case html.TextNode:
+ return h.curr.Data
+ }
+ return ""
+}
+
+func (h *NodeNavigator) Copy() xpath.NodeNavigator {
+ n := *h
+ return &n
+}
+
+func (h *NodeNavigator) MoveToRoot() {
+ h.curr = h.root
+}
+
+func (h *NodeNavigator) MoveToParent() bool {
+ if h.attr != -1 {
+ h.attr = -1
+ return true
+ } else if node := h.curr.Parent; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToNextAttribute() bool {
+ if h.attr >= len(h.curr.Attr)-1 {
+ return false
+ }
+ h.attr++
+ return true
+}
+
+func (h *NodeNavigator) MoveToChild() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.FirstChild; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToFirst() bool {
+ if h.attr != -1 || h.curr.PrevSibling == nil {
+ return false
+ }
+ for {
+ node := h.curr.PrevSibling
+ if node == nil {
+ break
+ }
+ h.curr = node
+ }
+ return true
+}
+
+func (h *NodeNavigator) String() string {
+ return h.Value()
+}
+
+func (h *NodeNavigator) MoveToNext() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.NextSibling; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveToPrevious() bool {
+ if h.attr != -1 {
+ return false
+ }
+ if node := h.curr.PrevSibling; node != nil {
+ h.curr = node
+ return true
+ }
+ return false
+}
+
+func (h *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
+ node, ok := other.(*NodeNavigator)
+ if !ok || node.root != h.root {
+ return false
+ }
+
+ h.curr = node.curr
+ h.attr = node.attr
+ return true
+}
diff --git a/vendor/github.com/antchfx/htmlquery/query_test.go b/vendor/github.com/antchfx/htmlquery/query_test.go
new file mode 100644
index 0000000..ce79f35
--- /dev/null
+++ b/vendor/github.com/antchfx/htmlquery/query_test.go
@@ -0,0 +1,124 @@
+package htmlquery
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/antchfx/xpath"
+ "golang.org/x/net/html"
+)
+
+const htmlSample = `
+
+Hello,World!
+
+
+
+
+
+
City Gallery
+
+
+
+
London
+
+
London is the capital city of England. It is the most populous city in the United Kingdom, with a metropolitan area of over 13 million inhabitants.
+
Standing on the River Thames, London has been a major settlement for two millennia, its history going back to its founding by the Romans, who named it Londinium.
+
+
+
+
+
+`
+
+var testDoc = loadHTML(htmlSample)
+
+func TestLoadURL(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, htmlSample)
+ }))
+ defer ts.Close()
+
+ _, err := LoadURL(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestNavigator(t *testing.T) {
+ top := FindOne(testDoc, "//html")
+ nav := &NodeNavigator{curr: top, root: top, attr: -1}
+ nav.MoveToChild() // HEAD
+ nav.MoveToNext()
+ if nav.NodeType() != xpath.TextNode {
+ t.Fatalf("expectd node type is TextNode,but got %vs", nav.NodeType())
+ }
+ nav.MoveToNext() //
+ if nav.Value() != InnerText(FindOne(testDoc, "//body")) {
+ t.Fatal("body not equal")
+ }
+ nav.MoveToPrevious() //
+ nav.MoveToParent() //
+ if nav.curr != top {
+ t.Fatal("current node is not html node")
+ }
+ nav.MoveToNextAttribute()
+ if nav.LocalName() != "lang" {
+ t.Fatal("node not move to lang attribute")
+ }
+
+ nav.MoveToParent()
+ nav.MoveToFirst() //
+ if nav.curr.Type != html.DoctypeNode {
+ t.Fatalf("expected node type is DoctypeNode,but got %d", nav.curr.Type)
+ }
+}
+
+func TestXPath(t *testing.T) {
+ node := FindOne(testDoc, "//html")
+ if SelectAttr(node, "lang") != "en-US" {
+ t.Fatal("//html[@lang] != en-Us")
+ }
+
+ node = FindOne(testDoc, "//header")
+ if strings.Index(InnerText(node), "Logo") > 0 {
+ t.Fatal("InnerText() have comment node text")
+ }
+ if strings.Index(OutputHTML(node, true), "Logo") == -1 {
+ t.Fatal("OutputHTML() shoud have comment node text")
+ }
+ link := FindOne(testDoc, "//a[1]/@href")
+ if link == nil {
+ t.Fatal("link is nil")
+ }
+ if v := InnerText(link); v != "/London" {
+ t.Fatalf("expect value is /London, but got %s", v)
+ }
+
+}
+
+func TestXPathCdUp(t *testing.T) {
+ doc := loadHTML(``)
+ node := FindOne(doc, "//b/@attr/..")
+ t.Logf("node = %#v", node)
+ if node == nil || node.Data != "b" {
+ t.Fatal("//b/@id/.. != ")
+ }
+}
+
+func loadHTML(str string) *html.Node {
+ node, err := Parse(strings.NewReader(str))
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
diff --git a/vendor/github.com/antchfx/xmlquery/.gitignore b/vendor/github.com/antchfx/xmlquery/.gitignore
new file mode 100644
index 0000000..4d5d27b
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/.gitignore
@@ -0,0 +1,32 @@
+# vscode
+.vscode
+debug
+*.test
+
+./build
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/.travis.yml b/vendor/github.com/antchfx/xmlquery/.travis.yml
new file mode 100644
index 0000000..d9a7bb8
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+
+install:
+ - go get golang.org/x/net/html/charset
+ - go get github.com/antchfx/xpath
+ - go get github.com/mattn/goveralls
+
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/LICENSE b/vendor/github.com/antchfx/xmlquery/LICENSE
new file mode 100644
index 0000000..e14c371
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/README.md b/vendor/github.com/antchfx/xmlquery/README.md
new file mode 100644
index 0000000..6683afd
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/README.md
@@ -0,0 +1,186 @@
+xmlquery
+====
+[![Build Status](https://travis-ci.org/antchfx/xmlquery.svg?branch=master)](https://travis-ci.org/antchfx/xmlquery)
+[![Coverage Status](https://coveralls.io/repos/github/antchfx/xmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xmlquery?branch=master)
+[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery)
+
+Overview
+===
+
+xmlquery is an XPath query package for XML document, lets you extract data or evaluate from XML documents by an XPath expression.
+
+Change Logs
+===
+
+**2018-12-23**
+* added XML output will including comment node. [#9](https://github.com/antchfx/xmlquery/issues/9)
+
+**2018-12-03**
+ * added support attribute name with namespace prefix and XML output. [#6](https://github.com/antchfx/xmlquery/issues/6)
+
+Installation
+====
+
+> $ go get github.com/antchfx/xmlquery
+
+Getting Started
+===
+
+#### Parse a XML from URL.
+
+```go
+doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml")
+```
+
+#### Parse a XML from string.
+
+```go
+s := ``
+doc, err := xmlquery.Parse(strings.NewReader(s))
+```
+
+#### Parse a XML from io.Reader.
+
+```go
+f, err := os.Open("../books.xml")
+doc, err := xmlquery.Parse(f)
+```
+
+#### Find authors of all books in the bookstore.
+
+```go
+list := xmlquery.Find(doc, "//book//author")
+// or
+list := xmlquery.Find(doc, "//author")
+```
+
+#### Find the second book.
+
+```go
+book := xmlquery.FindOne(doc, "//book[2]")
+```
+
+#### Find all book elements and only get `id` attribute self. (New Feature)
+
+```go
+list := xmlquery.Find(doc,"//book/@id")
+```
+
+#### Find all books with id is bk104.
+
+```go
+list := xmlquery.Find(doc, "//book[@id='bk104']")
+```
+
+#### Find all books that price less than 5.
+
+```go
+list := xmlquery.Find(doc, "//book[price<5]")
+```
+
+#### Evaluate the total price of all books.
+
+```go
+expr, err := xpath.Compile("sum(//book/price)")
+price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
+fmt.Printf("total price: %f\n", price)
+```
+
+#### Evaluate the number of all books element.
+
+```go
+expr, err := xpath.Compile("count(//book)")
+price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
+```
+
+#### Create XML document.
+
+```go
+doc := &xmlquery.Node{
+ Type: xmlquery.DeclarationNode,
+ Data: "xml",
+ Attr: []xml.Attr{
+ xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"},
+ },
+}
+root := &xmlquery.Node{
+ Data: "rss",
+ Type: xmlquery.ElementNode,
+}
+doc.FirstChild = root
+channel := &xmlquery.Node{
+ Data: "channel",
+ Type: xmlquery.ElementNode,
+}
+root.FirstChild = channel
+title := &xmlquery.Node{
+ Data: "title",
+ Type: xmlquery.ElementNode,
+}
+title_text := &xmlquery.Node{
+ Data: "W3Schools Home Page",
+ Type: xmlquery.TextNode,
+}
+title.FirstChild = title_text
+channel.FirstChild = title
+fmt.Println(doc.OutputXML(true))
+// W3Schools Home Page
+```
+
+Quick Tutorial
+===
+
+```go
+import (
+ "github.com/antchfx/xmlquery"
+)
+
+func main(){
+ s := `
+
+
+ W3Schools Home Page
+ https://www.w3schools.com
+ Free web building tutorials
+
+ RSS Tutorial
+ https://www.w3schools.com/xml/xml_rss.asp
+ New RSS tutorial on W3Schools
+
+
+ XML Tutorial
+ https://www.w3schools.com/xml
+ New XML tutorial on W3Schools
+
+
+`
+
+ doc, err := xmlquery.Parse(strings.NewReader(s))
+ if err != nil {
+ panic(err)
+ }
+ channel := xmlquery.FindOne(doc, "//channel")
+ if n := channel.SelectElement("title"); n != nil {
+ fmt.Printf("title: %s\n", n.InnerText())
+ }
+ if n := channel.SelectElement("link"); n != nil {
+ fmt.Printf("link: %s\n", n.InnerText())
+ }
+ for i, n := range xmlquery.Find(doc, "//item/title") {
+ fmt.Printf("#%d %s\n", i, n.InnerText())
+ }
+}
+```
+
+List of supported XPath query packages
+===
+|Name |Description |
+|--------------------------|----------------|
+|[htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document|
+|[xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document|
+|[jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document|
+
+ Questions
+===
+Please let me know if you have any questions
diff --git a/vendor/github.com/antchfx/xmlquery/books.xml b/vendor/github.com/antchfx/xmlquery/books.xml
new file mode 100644
index 0000000..85a74b5
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/books.xml
@@ -0,0 +1,121 @@
+
+
+
+
+ Gambardella, Matthew
+ XML Developer's Guide
+ Computer
+ 44.95
+ 2000-10-01
+ An in-depth look at creating applications
+ with XML.
+
+
+ Ralls, Kim
+ Midnight Rain
+ Fantasy
+ 5.95
+ 2000-12-16
+ A former architect battles corporate zombies,
+ an evil sorceress, and her own childhood to become queen
+ of the world.
+
+
+ Corets, Eva
+ Maeve Ascendant
+ Fantasy
+ 5.95
+ 2000-11-17
+ After the collapse of a nanotechnology
+ society in England, the young survivors lay the
+ foundation for a new society.
+
+
+ Corets, Eva
+ Oberon's Legacy
+ Fantasy
+ 5.95
+ 2001-03-10
+ In post-apocalypse England, the mysterious
+ agent known only as Oberon helps to create a new life
+ for the inhabitants of London. Sequel to Maeve
+ Ascendant.
+
+
+ Corets, Eva
+ The Sundered Grail
+ Fantasy
+ 5.95
+ 2001-09-10
+ The two daughters of Maeve, half-sisters,
+ battle one another for control of England. Sequel to
+ Oberon's Legacy.
+
+
+ Randall, Cynthia
+ Lover Birds
+ Romance
+ 4.95
+ 2000-09-02
+ When Carla meets Paul at an ornithology
+ conference, tempers fly as feathers get ruffled.
+
+
+ Thurman, Paula
+ Splish Splash
+ Romance
+ 4.95
+ 2000-11-02
+ A deep sea diver finds true love twenty
+ thousand leagues beneath the sea.
+
+
+ Knorr, Stefan
+ Creepy Crawlies
+ Horror
+ 4.95
+ 2000-12-06
+ An anthology of horror stories about roaches,
+ centipedes, scorpions and other insects.
+
+
+ Kress, Peter
+ Paradox Lost
+ Science Fiction
+ 6.95
+ 2000-11-02
+ After an inadvertant trip through a Heisenberg
+ Uncertainty Device, James Salway discovers the problems
+ of being quantum.
+
+
+ O'Brien, Tim
+ Microsoft .NET: The Programming Bible
+ Computer
+ 36.95
+ 2000-12-09
+ Microsoft's .NET initiative is explored in
+ detail in this deep programmer's reference.
+
+
+ O'Brien, Tim
+ MSXML3: A Comprehensive Guide
+ Computer
+ 36.95
+ 2000-12-01
+ The Microsoft MSXML3 parser is covered in
+ detail, with attention to XML DOM interfaces, XSLT processing,
+ SAX and more.
+
+
+ Galos, Mike
+ Visual Studio 7: A Comprehensive Guide
+ Computer
+ 49.95
+ 2001-04-16
+ Microsoft Visual Studio 7 is explored in depth,
+ looking at how Visual Basic, Visual C++, C#, and ASP+ are
+ integrated into a comprehensive development
+ environment.
+
+
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xmlquery/node.go b/vendor/github.com/antchfx/xmlquery/node.go
new file mode 100644
index 0000000..d0e6a54
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/node.go
@@ -0,0 +1,302 @@
+package xmlquery
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "golang.org/x/net/html/charset"
+)
+
+// A NodeType is the type of a Node.
+type NodeType uint
+
+const (
+ // DocumentNode is a document object that, as the root of the document tree,
+ // provides access to the entire XML document.
+ DocumentNode NodeType = iota
+ // DeclarationNode is the document type declaration, indicated by the following
+ // tag (for example, ).
+ DeclarationNode
+ // ElementNode is an element (for example, ).
+ ElementNode
+ // TextNode is the text content of a node.
+ TextNode
+ // CommentNode a comment (for example, ).
+ CommentNode
+ // AttributeNode is an attribute of element.
+ AttributeNode
+)
+
+// A Node consists of a NodeType and some Data (tag name for
+// element nodes, content for text) and are part of a tree of Nodes.
+type Node struct {
+ Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
+
+ Type NodeType
+ Data string
+ Prefix string
+ NamespaceURI string
+ Attr []xml.Attr
+
+ level int // node level in the tree
+}
+
+// InnerText returns the text between the start and end tags of the object.
+func (n *Node) InnerText() string {
+ var output func(*bytes.Buffer, *Node)
+ output = func(buf *bytes.Buffer, n *Node) {
+ switch n.Type {
+ case TextNode:
+ buf.WriteString(n.Data)
+ return
+ case CommentNode:
+ return
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ output(buf, child)
+ }
+ }
+
+ var buf bytes.Buffer
+ output(&buf, n)
+ return buf.String()
+}
+
+func outputXML(buf *bytes.Buffer, n *Node) {
+ if n.Type == TextNode {
+ xml.EscapeText(buf, []byte(strings.TrimSpace(n.Data)))
+ return
+ }
+ if n.Type == CommentNode {
+ buf.WriteString("")
+ return
+ }
+ if n.Type == DeclarationNode {
+ buf.WriteString("" + n.Data)
+ } else {
+ if n.Prefix == "" {
+ buf.WriteString("<" + n.Data)
+ } else {
+ buf.WriteString("<" + n.Prefix + ":" + n.Data)
+ }
+ }
+
+ for _, attr := range n.Attr {
+ if attr.Name.Space != "" {
+ buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
+ } else {
+ buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
+ }
+ }
+ if n.Type == DeclarationNode {
+ buf.WriteString("?>")
+ } else {
+ buf.WriteString(">")
+ }
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ outputXML(buf, child)
+ }
+ if n.Type != DeclarationNode {
+ if n.Prefix == "" {
+ buf.WriteString(fmt.Sprintf("%s>", n.Data))
+ } else {
+ buf.WriteString(fmt.Sprintf("%s:%s>", n.Prefix, n.Data))
+ }
+ }
+}
+
+// OutputXML returns the text that including tags name.
+func (n *Node) OutputXML(self bool) string {
+ var buf bytes.Buffer
+ if self {
+ outputXML(&buf, n)
+ } else {
+ for n := n.FirstChild; n != nil; n = n.NextSibling {
+ outputXML(&buf, n)
+ }
+ }
+
+ return buf.String()
+}
+
+func addAttr(n *Node, key, val string) {
+ var attr xml.Attr
+ if i := strings.Index(key, ":"); i > 0 {
+ attr = xml.Attr{
+ Name: xml.Name{Space: key[:i], Local: key[i+1:]},
+ Value: val,
+ }
+ } else {
+ attr = xml.Attr{
+ Name: xml.Name{Local: key},
+ Value: val,
+ }
+ }
+
+ n.Attr = append(n.Attr, attr)
+}
+
+func addChild(parent, n *Node) {
+ n.Parent = parent
+ if parent.FirstChild == nil {
+ parent.FirstChild = n
+ } else {
+ parent.LastChild.NextSibling = n
+ n.PrevSibling = parent.LastChild
+ }
+
+ parent.LastChild = n
+}
+
+func addSibling(sibling, n *Node) {
+ for t := sibling.NextSibling; t != nil; t = t.NextSibling {
+ sibling = t
+ }
+ n.Parent = sibling.Parent
+ sibling.NextSibling = n
+ n.PrevSibling = sibling
+ if sibling.Parent != nil {
+ sibling.Parent.LastChild = n
+ }
+}
+
+// LoadURL loads the XML document from the specified URL.
+func LoadURL(url string) (*Node, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ return parse(resp.Body)
+}
+
+func parse(r io.Reader) (*Node, error) {
+ var (
+ decoder = xml.NewDecoder(r)
+ doc = &Node{Type: DocumentNode}
+ space2prefix = make(map[string]string)
+ level = 0
+ )
+ // http://www.w3.org/XML/1998/namespace is bound by definition to the prefix xml.
+ space2prefix["http://www.w3.org/XML/1998/namespace"] = "xml"
+ decoder.CharsetReader = charset.NewReaderLabel
+ prev := doc
+ for {
+ tok, err := decoder.Token()
+ switch {
+ case err == io.EOF:
+ goto quit
+ case err != nil:
+ return nil, err
+ }
+
+ switch tok := tok.(type) {
+ case xml.StartElement:
+ if level == 0 {
+ // mising XML declaration
+ node := &Node{Type: DeclarationNode, Data: "xml", level: 1}
+ addChild(prev, node)
+ level = 1
+ prev = node
+ }
+ // https://www.w3.org/TR/xml-names/#scoping-defaulting
+ for _, att := range tok.Attr {
+ if att.Name.Local == "xmlns" {
+ space2prefix[att.Value] = ""
+ } else if att.Name.Space == "xmlns" {
+ space2prefix[att.Value] = att.Name.Local
+ }
+ }
+
+ if tok.Name.Space != "" {
+ if _, found := space2prefix[tok.Name.Space]; !found {
+ return nil, errors.New("xmlquery: invalid XML document, namespace is missing")
+ }
+ }
+
+ for i := 0; i < len(tok.Attr); i++ {
+ att := &tok.Attr[i]
+ if prefix, ok := space2prefix[att.Name.Space]; ok {
+ att.Name.Space = prefix
+ }
+ }
+
+ node := &Node{
+ Type: ElementNode,
+ Data: tok.Name.Local,
+ Prefix: space2prefix[tok.Name.Space],
+ NamespaceURI: tok.Name.Space,
+ Attr: tok.Attr,
+ level: level,
+ }
+ //fmt.Println(fmt.Sprintf("start > %s : %d", node.Data, level))
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ } else if level < prev.level {
+ for i := prev.level - level; i > 1; i-- {
+ prev = prev.Parent
+ }
+ addSibling(prev.Parent, node)
+ }
+ prev = node
+ level++
+ case xml.EndElement:
+ level--
+ case xml.CharData:
+ node := &Node{Type: TextNode, Data: string(tok), level: level}
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ }
+ case xml.Comment:
+ node := &Node{Type: CommentNode, Data: string(tok), level: level}
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ } else if level < prev.level {
+ for i := prev.level - level; i > 1; i-- {
+ prev = prev.Parent
+ }
+ addSibling(prev.Parent, node)
+ }
+ case xml.ProcInst: // Processing Instruction
+ if prev.Type != DeclarationNode {
+ level++
+ }
+ node := &Node{Type: DeclarationNode, Data: tok.Target, level: level}
+ pairs := strings.Split(string(tok.Inst), " ")
+ for _, pair := range pairs {
+ pair = strings.TrimSpace(pair)
+ if i := strings.Index(pair, "="); i > 0 {
+ addAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
+ }
+ }
+ if level == prev.level {
+ addSibling(prev, node)
+ } else if level > prev.level {
+ addChild(prev, node)
+ }
+ prev = node
+ case xml.Directive:
+ }
+
+ }
+quit:
+ return doc, nil
+}
+
+// Parse returns the parse tree for the XML from the given Reader.
+func Parse(r io.Reader) (*Node, error) {
+ return parse(r)
+}
diff --git a/vendor/github.com/antchfx/xmlquery/node_test.go b/vendor/github.com/antchfx/xmlquery/node_test.go
new file mode 100644
index 0000000..1936958
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/node_test.go
@@ -0,0 +1,346 @@
+package xmlquery
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func findNode(root *Node, name string) *Node {
+ node := root.FirstChild
+ for {
+ if node == nil || node.Data == name {
+ break
+ }
+ node = node.NextSibling
+ }
+ return node
+}
+
+func childNodes(root *Node, name string) []*Node {
+ var list []*Node
+ node := root.FirstChild
+ for {
+ if node == nil {
+ break
+ }
+ if node.Data == name {
+ list = append(list, node)
+ }
+ node = node.NextSibling
+ }
+ return list
+}
+
+func testNode(t *testing.T, n *Node, expected string) {
+ if n.Data != expected {
+ t.Fatalf("expected node name is %s,but got %s", expected, n.Data)
+ }
+}
+
+func testAttr(t *testing.T, n *Node, name, expected string) {
+ for _, attr := range n.Attr {
+ if attr.Name.Local == name && attr.Value == expected {
+ return
+ }
+ }
+ t.Fatalf("not found attribute %s in the node %s", name, n.Data)
+}
+
+func testValue(t *testing.T, val, expected string) {
+ if val != expected {
+ t.Fatalf("expected value is %s,but got %s", expected, val)
+ }
+}
+
+func TestLoadURL(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ s := `
+
+
+ `
+ w.Header().Set("Content-Type", "text/xml")
+ w.Write([]byte(s))
+ }))
+ defer server.Close()
+ _, err := LoadURL(server.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestNamespaceURL(t *testing.T) {
+ s := `
+
+
+
+
+21|22021348
+
+ `
+ doc, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Fatal(err)
+ }
+ top := FindOne(doc, "//rss")
+ if top == nil {
+ t.Fatal("rss feed invalid")
+ }
+ node := FindOne(top, "dc:creator")
+ if node.Prefix != "dc" {
+ t.Fatalf("expected node prefix name is dc but is=%s", node.Prefix)
+ }
+ if node.NamespaceURI != "https://purl.org/dc/elements/1.1/" {
+ t.Fatalf("dc:creator != %s", node.NamespaceURI)
+ }
+ if strings.Index(top.InnerText(), "author") > 0 {
+ t.Fatalf("InnerText() include comment node text")
+ }
+ if strings.Index(top.OutputXML(true), "author") == -1 {
+ t.Fatal("OutputXML shoud include comment node,but not")
+ }
+}
+
+func TestMultipleProcInst(t *testing.T) {
+ s := `
+
+
+
+
+ `
+ doc, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ node := doc.FirstChild //
+ if node.Data != "xml" {
+ t.Fatal("node.Data != xml")
+ }
+ node = node.NextSibling // New Line
+ node = node.NextSibling //
+ if node.Data != "xml-stylesheet" {
+ t.Fatal("node.Data != xml-stylesheet")
+ }
+}
+
+func TestParse(t *testing.T) {
+ s := `
+
+
+ Harry Potter
+ 29.99
+
+
+ Learning XML
+ 39.95
+
+`
+ root, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Error(err)
+ }
+ if root.Type != DocumentNode {
+ t.Fatal("top node of tree is not DocumentNode")
+ }
+
+ declarNode := root.FirstChild
+ if declarNode.Type != DeclarationNode {
+ t.Fatal("first child node of tree is not DeclarationNode")
+ }
+
+ if declarNode.Attr[0].Name.Local != "version" && declarNode.Attr[0].Value != "1.0" {
+ t.Fatal("version attribute not expected")
+ }
+
+ bookstore := root.LastChild
+ if bookstore.Data != "bookstore" {
+ t.Fatal("bookstore elem not found")
+ }
+ if bookstore.FirstChild.Data != "\n" {
+ t.Fatal("first child node of bookstore is not empty node(\n)")
+ }
+ books := childNodes(bookstore, "book")
+ if len(books) != 2 {
+ t.Fatalf("expected book element count is 2, but got %d", len(books))
+ }
+ // first book element
+ testNode(t, findNode(books[0], "title"), "title")
+ testAttr(t, findNode(books[0], "title"), "lang", "en")
+ testValue(t, findNode(books[0], "price").InnerText(), "29.99")
+ testValue(t, findNode(books[0], "title").InnerText(), "Harry Potter")
+
+ // second book element
+ testNode(t, findNode(books[1], "title"), "title")
+ testAttr(t, findNode(books[1], "title"), "lang", "en")
+ testValue(t, findNode(books[1], "price").InnerText(), "39.95")
+
+ testValue(t, books[0].OutputXML(true), `Harry Potter29.99`)
+}
+
+func TestMissDeclaration(t *testing.T) {
+ s := `
+
+
+ `
+ doc, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Fatal(err)
+ }
+ node := FindOne(doc, "//AAA")
+ if node == nil {
+ t.Fatal("//AAA is nil")
+ }
+}
+
+func TestMissingNamespace(t *testing.T) {
+ s := `
+ value 1
+ value 2
+ `
+ _, err := Parse(strings.NewReader(s))
+ if err == nil {
+ t.Fatal("err is nil, want got invalid XML document")
+ }
+}
+
+func TestTooNested(t *testing.T) {
+ s := `
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ `
+ root, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Error(err)
+ }
+ aaa := findNode(root, "AAA")
+ if aaa == nil {
+ t.Fatal("AAA node not exists")
+ }
+ ccc := aaa.LastChild
+ if ccc.Data != "CCC" {
+ t.Fatalf("expected node is CCC,but got %s", ccc.Data)
+ }
+ bbb := ccc.PrevSibling
+ if bbb.Data != "BBB" {
+ t.Fatalf("expected node is bbb,but got %s", bbb.Data)
+ }
+ ddd := findNode(bbb, "DDD")
+ testNode(t, ddd, "DDD")
+ testNode(t, ddd.LastChild, "CCC")
+}
+
+func TestSelectElement(t *testing.T) {
+ s := `
+
+
+
+
+
+
+
+
+ `
+ root, err := Parse(strings.NewReader(s))
+ if err != nil {
+ t.Error(err)
+ }
+ version := root.FirstChild.SelectAttr("version")
+ if version != "1.0" {
+ t.Fatal("version!=1.0")
+ }
+ aaa := findNode(root, "AAA")
+ var n *Node
+ n = aaa.SelectElement("BBB")
+ if n == nil {
+ t.Fatalf("n is nil")
+ }
+ n = aaa.SelectElement("CCC")
+ if n == nil {
+ t.Fatalf("n is nil")
+ }
+
+ var ns []*Node
+ ns = aaa.SelectElements("CCC")
+ if len(ns) != 2 {
+ t.Fatalf("len(ns)!=2")
+ }
+}
+
+func TestEscapeOutputValue(t *testing.T) {
+ data := `<*>`
+
+ root, err := Parse(strings.NewReader(data))
+ if err != nil {
+ t.Error(err)
+ }
+
+ escapedInnerText := root.OutputXML(true)
+ if !strings.Contains(escapedInnerText, "<*>") {
+ t.Fatal("Inner Text has not been escaped")
+ }
+
+}
+func TestOutputXMLWithNamespacePrefix(t *testing.T) {
+ s := ``
+ doc, _ := Parse(strings.NewReader(s))
+ if s != doc.OutputXML(false) {
+ t.Fatal("xml document missing some characters")
+ }
+}
+
+func TestAttributeWithNamespace(t *testing.T) {
+ s := `
+
+ `
+ doc, _ := Parse(strings.NewReader(s))
+ n := FindOne(doc, "//good[@n1:a='2']")
+ if n == nil {
+ t.Fatal("n is nil")
+ }
+}
+
+func TestOutputXMLWithCommentNode(t *testing.T) {
+ s := `
+
+
+
+ Robert
+ A+
+
+
+ `
+ doc, _ := Parse(strings.NewReader(s))
+ t.Log(doc.OutputXML(true))
+ if e, g := "", doc.OutputXML(true); strings.Index(g, e) == -1 {
+ t.Fatal("missing some comment-node.")
+ }
+ n := FindOne(doc, "//class_list")
+ t.Log(n.OutputXML(false))
+ if e, g := "Lenard", n.OutputXML(false); strings.Index(g, e) == -1 {
+ t.Fatal("missing some comment-node")
+ }
+}
diff --git a/vendor/github.com/antchfx/xmlquery/query.go b/vendor/github.com/antchfx/xmlquery/query.go
new file mode 100644
index 0000000..e3a0db7
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/query.go
@@ -0,0 +1,264 @@
+/*
+Package xmlquery provides extract data from XML documents using XPath expression.
+*/
+package xmlquery
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/antchfx/xpath"
+)
+
+// SelectElements finds child elements with the specified name.
+func (n *Node) SelectElements(name string) []*Node {
+ return Find(n, name)
+}
+
+// SelectElement finds child elements with the specified name.
+func (n *Node) SelectElement(name string) *Node {
+ return FindOne(n, name)
+}
+
+// SelectAttr returns the attribute value with the specified name.
+func (n *Node) SelectAttr(name string) string {
+ if n.Type == AttributeNode {
+ if n.Data == name {
+ return n.InnerText()
+ }
+ return ""
+ }
+ var local, space string
+ local = name
+ if i := strings.Index(name, ":"); i > 0 {
+ space = name[:i]
+ local = name[i+1:]
+ }
+ for _, attr := range n.Attr {
+ if attr.Name.Local == local && attr.Name.Space == space {
+ return attr.Value
+ }
+ }
+ return ""
+}
+
+var _ xpath.NodeNavigator = &NodeNavigator{}
+
+// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
+func CreateXPathNavigator(top *Node) *NodeNavigator {
+ return &NodeNavigator{curr: top, root: top, attr: -1}
+}
+
+func getCurrentNode(it *xpath.NodeIterator) *Node {
+ n := it.Current().(*NodeNavigator)
+ if n.NodeType() == xpath.AttributeNode {
+ childNode := &Node{
+ Type: TextNode,
+ Data: n.Value(),
+ }
+ return &Node{
+ Type: AttributeNode,
+ Data: n.LocalName(),
+ FirstChild: childNode,
+ LastChild: childNode,
+ }
+ }
+ return n.curr
+}
+
+// Find searches the Node that matches by the specified XPath expr.
+func Find(top *Node, expr string) []*Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ var elems []*Node
+ for t.MoveNext() {
+ elems = append(elems, getCurrentNode(t))
+ }
+ return elems
+}
+
+// FindOne searches the Node that matches by the specified XPath expr,
+// and returns first element of matched.
+func FindOne(top *Node, expr string) *Node {
+ exp, err := xpath.Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ t := exp.Select(CreateXPathNavigator(top))
+ var elem *Node
+ if t.MoveNext() {
+ elem = getCurrentNode(t)
+ }
+ return elem
+}
+
+// FindEach searches the html.Node and calls functions cb.
+// Important: this method has deprecated, recommend use for .. = range Find(){}.
+func FindEach(top *Node, expr string, cb func(int, *Node)) {
+ for i, n := range Find(top, expr) {
+ cb(i, n)
+ }
+}
+
+// FindEachWithBreak functions the same as FindEach but allows you
+// to break the loop by returning false from your callback function, cb.
+// Important: this method has deprecated, recommend use for .. = range Find(){}.
+func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) {
+ for i, n := range Find(top, expr) {
+ if !cb(i, n) {
+ break
+ }
+ }
+}
+
+type NodeNavigator struct {
+ root, curr *Node
+ attr int
+}
+
+func (x *NodeNavigator) Current() *Node {
+ return x.curr
+}
+
+func (x *NodeNavigator) NodeType() xpath.NodeType {
+ switch x.curr.Type {
+ case CommentNode:
+ return xpath.CommentNode
+ case TextNode:
+ return xpath.TextNode
+ case DeclarationNode, DocumentNode:
+ return xpath.RootNode
+ case ElementNode:
+ if x.attr != -1 {
+ return xpath.AttributeNode
+ }
+ return xpath.ElementNode
+ }
+ panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type))
+}
+
+func (x *NodeNavigator) LocalName() string {
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Name.Local
+ }
+ return x.curr.Data
+
+}
+
+func (x *NodeNavigator) Prefix() string {
+ if x.NodeType() == xpath.AttributeNode {
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Name.Space
+ }
+ return ""
+ }
+ return x.curr.Prefix
+}
+
+func (x *NodeNavigator) Value() string {
+ switch x.curr.Type {
+ case CommentNode:
+ return x.curr.Data
+ case ElementNode:
+ if x.attr != -1 {
+ return x.curr.Attr[x.attr].Value
+ }
+ return x.curr.InnerText()
+ case TextNode:
+ return x.curr.Data
+ }
+ return ""
+}
+
+func (x *NodeNavigator) Copy() xpath.NodeNavigator {
+ n := *x
+ return &n
+}
+
+func (x *NodeNavigator) MoveToRoot() {
+ x.curr = x.root
+}
+
+func (x *NodeNavigator) MoveToParent() bool {
+ if x.attr != -1 {
+ x.attr = -1
+ return true
+ } else if node := x.curr.Parent; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToNextAttribute() bool {
+ if x.attr >= len(x.curr.Attr)-1 {
+ return false
+ }
+ x.attr++
+ return true
+}
+
+func (x *NodeNavigator) MoveToChild() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.FirstChild; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToFirst() bool {
+ if x.attr != -1 || x.curr.PrevSibling == nil {
+ return false
+ }
+ for {
+ node := x.curr.PrevSibling
+ if node == nil {
+ break
+ }
+ x.curr = node
+ }
+ return true
+}
+
+func (x *NodeNavigator) String() string {
+ return x.Value()
+}
+
+func (x *NodeNavigator) MoveToNext() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.NextSibling; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveToPrevious() bool {
+ if x.attr != -1 {
+ return false
+ }
+ if node := x.curr.PrevSibling; node != nil {
+ x.curr = node
+ return true
+ }
+ return false
+}
+
+func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
+ node, ok := other.(*NodeNavigator)
+ if !ok || node.root != x.root {
+ return false
+ }
+
+ x.curr = node.curr
+ x.attr = node.attr
+ return true
+}
diff --git a/vendor/github.com/antchfx/xmlquery/query_test.go b/vendor/github.com/antchfx/xmlquery/query_test.go
new file mode 100644
index 0000000..ffad006
--- /dev/null
+++ b/vendor/github.com/antchfx/xmlquery/query_test.go
@@ -0,0 +1,127 @@
+package xmlquery
+
+import (
+ "strings"
+ "testing"
+)
+
+// https://msdn.microsoft.com/en-us/library/ms762271(v=vs.85).aspx
+const xmlDoc = `
+
+
+
+
+ Gambardella, Matthew
+ XML Developer's Guide
+ Computer
+ 44.95
+ 2000-10-01
+ An in-depth look at creating applications
+ with XML.
+
+
+ Ralls, Kim
+ Midnight Rain
+ Fantasy
+ 5.95
+ 2000-12-16
+ A former architect battles corporate zombies,
+ an evil sorceress, and her own childhood to become queen
+ of the world.
+
+
+ Corets, Eva
+ Maeve Ascendant
+ Fantasy
+ 5.95
+ 2000-11-17
+ After the collapse of a nanotechnology
+ society in England, the young survivors lay the
+ foundation for a new society.
+
+`
+
+var doc = loadXML(xmlDoc)
+
+func TestXPath(t *testing.T) {
+ if list := Find(doc, "//book"); len(list) != 3 {
+ t.Fatal("count(//book) != 3")
+ }
+ if node := FindOne(doc, "//book[@id='bk101']"); node == nil {
+ t.Fatal("//book[@id='bk101] is not found")
+ }
+ if node := FindOne(doc, "//book[price>=44.95]"); node == nil {
+ t.Fatal("//book/price>=44.95 is not found")
+ }
+ if list := Find(doc, "//book[genre='Fantasy']"); len(list) != 2 {
+ t.Fatal("//book[genre='Fantasy'] items count is not equal 2")
+ }
+ var c int
+ FindEach(doc, "//book", func(i int, n *Node) {
+ c++
+ })
+ l := len(Find(doc, "//book"))
+ if c != l {
+ t.Fatal("count(//book) != 3")
+ }
+ c = 0
+ FindEachWithBreak(doc, "//book", func(i int, n *Node) bool {
+ if c == l - 1 {
+ return false
+ }
+ c++
+ return true
+ })
+ if c != l - 1 {
+ t.Fatal("FindEachWithBreak failed to stop.")
+ }
+ node := FindOne(doc, "//book[1]")
+ if node.SelectAttr("id") != "bk101" {
+ t.Fatal("//book[1]/@id != bk101")
+ }
+}
+
+func TestXPathCdUp(t *testing.T) {
+ doc := loadXML(``)
+ node := FindOne(doc, "/a/b/@attr/..")
+ t.Logf("node = %#v", node)
+ if node == nil || node.Data != "b" {
+ t.Fatal("//b/@id/.. != ")
+ }
+}
+
+func TestNavigator(t *testing.T) {
+ nav := &NodeNavigator{curr: doc, root: doc, attr: -1}
+ nav.MoveToChild() // New Line
+ nav.MoveToNext()
+ nav.MoveToNext() // catalog
+ if nav.curr.Data != "catalog" {
+ t.Fatal("current node name != `catalog`")
+ }
+ nav.MoveToChild() // New Line
+ nav.MoveToNext() // comment node
+ if nav.curr.Type != CommentNode {
+ t.Fatal("node type not CommentNode")
+ }
+ nav.Value()
+ nav.MoveToNext() // New Line
+ nav.MoveToNext() //book
+ nav.MoveToChild()
+ nav.MoveToNext() // book/author
+ if nav.LocalName() != "author" {
+ t.Fatalf("node error")
+ }
+ nav.MoveToParent() // book
+ nav.MoveToNext() // next book
+ if nav.curr.SelectAttr("id") != "bk102" {
+ t.Fatal("node error")
+ }
+}
+
+func loadXML(s string) *Node {
+ node, err := Parse(strings.NewReader(s))
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
diff --git a/vendor/github.com/antchfx/xpath/.gitignore b/vendor/github.com/antchfx/xpath/.gitignore
new file mode 100644
index 0000000..4d5d27b
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/.gitignore
@@ -0,0 +1,32 @@
+# vscode
+.vscode
+debug
+*.test
+
+./build
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xpath/.travis.yml b/vendor/github.com/antchfx/xpath/.travis.yml
new file mode 100644
index 0000000..6b63957
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.6
+ - 1.9
+ - '1.10'
+
+install:
+ - go get github.com/mattn/goveralls
+
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xpath/LICENSE b/vendor/github.com/antchfx/xpath/LICENSE
new file mode 100644
index 0000000..e14c371
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xpath/README.md b/vendor/github.com/antchfx/xpath/README.md
new file mode 100644
index 0000000..414114d
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/README.md
@@ -0,0 +1,167 @@
+XPath
+====
+[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath)
+[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master)
+[![Build Status](https://travis-ci.org/antchfx/xpath.svg?branch=master)](https://travis-ci.org/antchfx/xpath)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath)
+
+XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
+
+Implementation
+===
+
+- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document
+
+- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document.
+
+- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document
+
+Supported Features
+===
+
+#### The basic XPath patterns.
+
+> The basic XPath patterns cover 90% of the cases that most stylesheets will need.
+
+- `node` : Selects all child elements with nodeName of node.
+
+- `*` : Selects all child elements.
+
+- `@attr` : Selects the attribute attr.
+
+- `@*` : Selects all attributes.
+
+- `node()` : Matches an org.w3c.dom.Node.
+
+- `text()` : Matches a org.w3c.dom.Text node.
+
+- `comment()` : Matches a comment.
+
+- `.` : Selects the current node.
+
+- `..` : Selects the parent of current node.
+
+- `/` : Selects the document node.
+
+- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr.
+
+- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position.
+
+- `a/b` : For each node matching a, add the nodes matching b to the result.
+
+- `a//b` : For each node matching a, add the descendant nodes matching b to the result.
+
+- `//b` : Returns elements in the entire document matching b.
+
+- `a|b` : All nodes matching a or b, union operation(not boolean or).
+
+- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence
+
+
+#### Node Axes
+
+- `child::*` : The child axis selects children of the current node.
+
+- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'.
+
+- `descendant-or-self::*` : Selects descendants including the current node.
+
+- `attribute::*` : Selects attributes of the current element. It is equivalent to @*
+
+- `following-sibling::*` : Selects nodes after the current node.
+
+- `preceding-sibling::*` : Selects nodes before the current node.
+
+- `following::*` : Selects the first matching node following in document order, excluding descendants.
+
+- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors.
+
+- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'.
+
+- `ancestor::*` : Selects matching ancestors.
+
+- `ancestor-or-self::*` : Selects ancestors including the current node.
+
+- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'.
+
+#### Expressions
+
+ The gxpath supported three types: number, boolean, string.
+
+- `path` : Selects nodes based on the path.
+
+- `a = b` : Standard comparisons.
+
+ * a = b True if a equals b.
+ * a != b True if a is not equal to b.
+ * a < b True if a is less than b.
+ * a <= b True if a is less than or equal to b.
+ * a > b True if a is greater than b.
+ * a >= b True if a is greater than or equal to b.
+
+- `a + b` : Arithmetic expressions.
+
+ * `- a` Unary minus
+ * a + b Add
+ * a - b Substract
+ * a * b Multiply
+ * a div b Divide
+ * a mod b Floating point mod, like Java.
+
+- `a or b` : Boolean `or` operation.
+
+- `a and b` : Boolean `and` operation.
+
+- `(expr)` : Parenthesized expressions.
+
+- `fun(arg1, ..., argn)` : Function calls:
+
+| Function | Supported |
+| --- | --- |
+`boolean()`| ✓ |
+`ceiling()`| ✓ |
+`choose()`| ✗ |
+`concat()`| ✓ |
+`contains()`| ✓ |
+`count()`| ✓ |
+`current()`| ✗ |
+`document()`| ✗ |
+`element-available()`| ✗ |
+`ends-with()`| ✓ |
+`false()`| ✓ |
+`floor()`| ✓ |
+`format-number()`| ✗ |
+`function-available()`| ✗ |
+`generate-id()`| ✗ |
+`id()`| ✗ |
+`key()`| ✗ |
+`lang()`| ✗ |
+`last()`| ✓ |
+`local-name()`| ✓ |
+`name()`| ✓ |
+`namespace-uri()`| ✓ |
+`normalize-space()`| ✓ |
+`not()`| ✓ |
+`number()`| ✓ |
+`position()`| ✓ |
+`round()`| ✓ |
+`starts-with()`| ✓ |
+`string()`| ✓ |
+`string-length()`| ✓ |
+`substring()`| ✓ |
+`substring-after()`| ✓ |
+`substring-before()`| ✓ |
+`sum()`| ✓ |
+`system-property()`| ✗ |
+`translate()`| ✓ |
+`true()`| ✓ |
+`unparsed-entity-url()` | ✗ |
+
+Changelogs
+===
+
+2019-01-29
+- improvement `normalize-space` function. [#32](https://github.com/antchfx/xpath/issues/32)
+
+2018-12-07
+- supports XPath 2.0 Sequence expressions. [#30](https://github.com/antchfx/xpath/pull/30) by [@minherz](https://github.com/minherz).
\ No newline at end of file
diff --git a/vendor/github.com/antchfx/xpath/build.go b/vendor/github.com/antchfx/xpath/build.go
new file mode 100644
index 0000000..74f266b
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/build.go
@@ -0,0 +1,483 @@
+package xpath
+
+import (
+ "errors"
+ "fmt"
+)
+
+type flag int
+
+const (
+ noneFlag flag = iota
+ filterFlag
+)
+
+// builder provides building an XPath expressions.
+type builder struct {
+ depth int
+ flag flag
+ firstInput query
+}
+
+// axisPredicate creates a predicate to predicating for this axis node.
+func axisPredicate(root *axisNode) func(NodeNavigator) bool {
+ // get current axix node type.
+ typ := ElementNode
+ switch root.AxeType {
+ case "attribute":
+ typ = AttributeNode
+ case "self", "parent":
+ typ = allNode
+ default:
+ switch root.Prop {
+ case "comment":
+ typ = CommentNode
+ case "text":
+ typ = TextNode
+ // case "processing-instruction":
+ // typ = ProcessingInstructionNode
+ case "node":
+ typ = allNode
+ }
+ }
+ nametest := root.LocalName != "" || root.Prefix != ""
+ predicate := func(n NodeNavigator) bool {
+ if typ == n.NodeType() || typ == allNode || typ == TextNode {
+ if nametest {
+ if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() {
+ return true
+ }
+ } else {
+ return true
+ }
+ }
+ return false
+ }
+
+ return predicate
+}
+
+// processAxisNode processes a query for the XPath axis node.
+func (b *builder) processAxisNode(root *axisNode) (query, error) {
+ var (
+ err error
+ qyInput query
+ qyOutput query
+ predicate = axisPredicate(root)
+ )
+
+ if root.Input == nil {
+ qyInput = &contextQuery{}
+ } else {
+ if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
+ if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
+ var qyGrandInput query
+ if input.Input != nil {
+ qyGrandInput, _ = b.processNode(input.Input)
+ } else {
+ qyGrandInput = &contextQuery{}
+ }
+ qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
+ return qyOutput, nil
+ }
+ }
+ qyInput, err = b.processNode(root.Input)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ switch root.AxeType {
+ case "ancestor":
+ qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate}
+ case "ancestor-or-self":
+ qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true}
+ case "attribute":
+ qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate}
+ case "child":
+ filter := func(n NodeNavigator) bool {
+ v := predicate(n)
+ switch root.Prop {
+ case "text":
+ v = v && n.NodeType() == TextNode
+ case "node":
+ v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
+ case "comment":
+ v = v && n.NodeType() == CommentNode
+ }
+ return v
+ }
+ qyOutput = &childQuery{Input: qyInput, Predicate: filter}
+ case "descendant":
+ qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate}
+ case "descendant-or-self":
+ qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true}
+ case "following":
+ qyOutput = &followingQuery{Input: qyInput, Predicate: predicate}
+ case "following-sibling":
+ qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
+ case "parent":
+ qyOutput = &parentQuery{Input: qyInput, Predicate: predicate}
+ case "preceding":
+ qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate}
+ case "preceding-sibling":
+ qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
+ case "self":
+ qyOutput = &selfQuery{Input: qyInput, Predicate: predicate}
+ case "namespace":
+ // haha,what will you do someting??
+ default:
+ err = fmt.Errorf("unknown axe type: %s", root.AxeType)
+ return nil, err
+ }
+ return qyOutput, nil
+}
+
+// processFilterNode builds query for the XPath filter predicate.
+func (b *builder) processFilterNode(root *filterNode) (query, error) {
+ b.flag |= filterFlag
+
+ qyInput, err := b.processNode(root.Input)
+ if err != nil {
+ return nil, err
+ }
+ qyCond, err := b.processNode(root.Condition)
+ if err != nil {
+ return nil, err
+ }
+ qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond}
+ return qyOutput, nil
+}
+
+// processFunctionNode processes query for the XPath function node.
+func (b *builder) processFunctionNode(root *functionNode) (query, error) {
+ var qyOutput query
+ switch root.FuncName {
+ case "starts-with":
+ arg1, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ arg2, err := b.processNode(root.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
+ case "ends-with":
+ arg1, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ arg2, err := b.processNode(root.Args[1])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)}
+ case "contains":
+ arg1, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ arg2, err := b.processNode(root.Args[1])
+ if err != nil {
+ return nil, err
+ }
+
+ qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)}
+ case "substring":
+ //substring( string , start [, length] )
+ if len(root.Args) < 2 {
+ return nil, errors.New("xpath: substring function must have at least two parameter")
+ }
+ var (
+ arg1, arg2, arg3 query
+ err error
+ )
+ if arg1, err = b.processNode(root.Args[0]); err != nil {
+ return nil, err
+ }
+ if arg2, err = b.processNode(root.Args[1]); err != nil {
+ return nil, err
+ }
+ if len(root.Args) == 3 {
+ if arg3, err = b.processNode(root.Args[2]); err != nil {
+ return nil, err
+ }
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
+ case "substring-before", "substring-after":
+ //substring-xxxx( haystack, needle )
+ if len(root.Args) != 2 {
+ return nil, errors.New("xpath: substring-before function must have two parameters")
+ }
+ var (
+ arg1, arg2 query
+ err error
+ )
+ if arg1, err = b.processNode(root.Args[0]); err != nil {
+ return nil, err
+ }
+ if arg2, err = b.processNode(root.Args[1]); err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{
+ Input: b.firstInput,
+ Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"),
+ }
+ case "string-length":
+ // string-length( [string] )
+ if len(root.Args) < 1 {
+ return nil, errors.New("xpath: string-length function must have at least one parameter")
+ }
+ arg1, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)}
+ case "normalize-space":
+ if len(root.Args) == 0 {
+ return nil, errors.New("xpath: normalize-space function must have at least one parameter")
+ }
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
+ case "translate":
+ //translate( string , string, string )
+ if len(root.Args) != 3 {
+ return nil, errors.New("xpath: translate function must have three parameters")
+ }
+ var (
+ arg1, arg2, arg3 query
+ err error
+ )
+ if arg1, err = b.processNode(root.Args[0]); err != nil {
+ return nil, err
+ }
+ if arg2, err = b.processNode(root.Args[1]); err != nil {
+ return nil, err
+ }
+ if arg3, err = b.processNode(root.Args[2]); err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)}
+ case "not":
+ if len(root.Args) == 0 {
+ return nil, errors.New("xpath: not function must have at least one parameter")
+ }
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
+ case "name", "local-name", "namespace-uri":
+ inp := b.firstInput
+ if len(root.Args) > 1 {
+ return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
+ }
+ if len(root.Args) == 1 {
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ inp = argQuery
+ }
+ f := &functionQuery{Input: inp}
+ switch root.FuncName {
+ case "name":
+ f.Func = nameFunc
+ case "local-name":
+ f.Func = localNameFunc
+ case "namespace-uri":
+ f.Func = namespaceFunc
+ }
+ qyOutput = f
+ case "true", "false":
+ val := root.FuncName == "true"
+ qyOutput = &functionQuery{
+ Input: b.firstInput,
+ Func: func(_ query, _ iterator) interface{} {
+ return val
+ },
+ }
+ case "last":
+ qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
+ case "position":
+ qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
+ case "boolean", "number", "string":
+ inp := b.firstInput
+ if len(root.Args) > 1 {
+ return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
+ }
+ if len(root.Args) == 1 {
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ inp = argQuery
+ }
+ f := &functionQuery{Input: inp}
+ switch root.FuncName {
+ case "boolean":
+ f.Func = booleanFunc
+ case "string":
+ f.Func = stringFunc
+ case "number":
+ f.Func = numberFunc
+ }
+ qyOutput = f
+ case "count":
+ //if b.firstInput == nil {
+ // return nil, errors.New("xpath: expression must evaluate to node-set")
+ //}
+ if len(root.Args) == 0 {
+ return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
+ }
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
+ case "sum":
+ if len(root.Args) == 0 {
+ return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
+ }
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
+ case "ceiling", "floor", "round":
+ if len(root.Args) == 0 {
+ return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
+ }
+ argQuery, err := b.processNode(root.Args[0])
+ if err != nil {
+ return nil, err
+ }
+ f := &functionQuery{Input: argQuery}
+ switch root.FuncName {
+ case "ceiling":
+ f.Func = ceilingFunc
+ case "floor":
+ f.Func = floorFunc
+ case "round":
+ f.Func = roundFunc
+ }
+ qyOutput = f
+ case "concat":
+ if len(root.Args) < 2 {
+ return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
+ }
+ var args []query
+ for _, v := range root.Args {
+ q, err := b.processNode(v)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, q)
+ }
+ qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)}
+ default:
+ return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
+ }
+ return qyOutput, nil
+}
+
+func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
+ left, err := b.processNode(root.Left)
+ if err != nil {
+ return nil, err
+ }
+ right, err := b.processNode(root.Right)
+ if err != nil {
+ return nil, err
+ }
+ var qyOutput query
+ switch root.Op {
+ case "+", "-", "div", "mod": // Numeric operator
+ var exprFunc func(interface{}, interface{}) interface{}
+ switch root.Op {
+ case "+":
+ exprFunc = plusFunc
+ case "-":
+ exprFunc = minusFunc
+ case "div":
+ exprFunc = divFunc
+ case "mod":
+ exprFunc = modFunc
+ }
+ qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc}
+ case "=", ">", ">=", "<", "<=", "!=":
+ var exprFunc func(iterator, interface{}, interface{}) interface{}
+ switch root.Op {
+ case "=":
+ exprFunc = eqFunc
+ case ">":
+ exprFunc = gtFunc
+ case ">=":
+ exprFunc = geFunc
+ case "<":
+ exprFunc = ltFunc
+ case "<=":
+ exprFunc = leFunc
+ case "!=":
+ exprFunc = neFunc
+ }
+ qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
+ case "or", "and":
+ isOr := false
+ if root.Op == "or" {
+ isOr = true
+ }
+ qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
+ case "|":
+ qyOutput = &unionQuery{Left: left, Right: right}
+ }
+ return qyOutput, nil
+}
+
+func (b *builder) processNode(root node) (q query, err error) {
+ if b.depth = b.depth + 1; b.depth > 1024 {
+ err = errors.New("the xpath expressions is too complex")
+ return
+ }
+
+ switch root.Type() {
+ case nodeConstantOperand:
+ n := root.(*operandNode)
+ q = &constantQuery{Val: n.Val}
+ case nodeRoot:
+ q = &contextQuery{Root: true}
+ case nodeAxis:
+ q, err = b.processAxisNode(root.(*axisNode))
+ b.firstInput = q
+ case nodeFilter:
+ q, err = b.processFilterNode(root.(*filterNode))
+ case nodeFunction:
+ q, err = b.processFunctionNode(root.(*functionNode))
+ case nodeOperator:
+ q, err = b.processOperatorNode(root.(*operatorNode))
+ }
+ return
+}
+
+// build builds a specified XPath expressions expr.
+func build(expr string) (q query, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ switch x := e.(type) {
+ case string:
+ err = errors.New(x)
+ case error:
+ err = x
+ default:
+ err = errors.New("unknown panic")
+ }
+ }
+ }()
+ root := parse(expr)
+ b := &builder{}
+ return b.processNode(root)
+}
diff --git a/vendor/github.com/antchfx/xpath/doc_test.go b/vendor/github.com/antchfx/xpath/doc_test.go
new file mode 100644
index 0000000..2ef8e83
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/doc_test.go
@@ -0,0 +1,33 @@
+package xpath_test
+
+import (
+ "fmt"
+
+ "github.com/antchfx/xpath"
+)
+
+// XPath package example.
+func Example() {
+ expr, err := xpath.Compile("count(//book)")
+ if err != nil {
+ panic(err)
+ }
+ var root xpath.NodeNavigator
+ // using Evaluate() method
+ val := expr.Evaluate(root) // it returns float64 type
+ fmt.Println(val.(float64))
+
+ // using Evaluate() method
+ expr = xpath.MustCompile("//book")
+ val = expr.Evaluate(root) // it returns NodeIterator type.
+ iter := val.(*xpath.NodeIterator)
+ for iter.MoveNext() {
+ fmt.Println(iter.Current().Value())
+ }
+
+ // using Select() method
+ iter = expr.Select(root) // it always returns NodeIterator object.
+ for iter.MoveNext() {
+ fmt.Println(iter.Current().Value())
+ }
+}
diff --git a/vendor/github.com/antchfx/xpath/func.go b/vendor/github.com/antchfx/xpath/func.go
new file mode 100644
index 0000000..3c0fde9
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/func.go
@@ -0,0 +1,484 @@
+package xpath
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The XPath function list.
+
+func predicate(q query) func(NodeNavigator) bool {
+ type Predicater interface {
+ Test(NodeNavigator) bool
+ }
+ if p, ok := q.(Predicater); ok {
+ return p.Test
+ }
+ return func(NodeNavigator) bool { return true }
+}
+
+// positionFunc is a XPath Node Set functions position().
+func positionFunc(q query, t iterator) interface{} {
+ var (
+ count = 1
+ node = t.Current()
+ )
+ test := predicate(q)
+ for node.MoveToPrevious() {
+ if test(node) {
+ count++
+ }
+ }
+ return float64(count)
+}
+
+// lastFunc is a XPath Node Set functions last().
+func lastFunc(q query, t iterator) interface{} {
+ var (
+ count = 0
+ node = t.Current()
+ )
+ node.MoveToFirst()
+ test := predicate(q)
+ for {
+ if test(node) {
+ count++
+ }
+ if !node.MoveToNext() {
+ break
+ }
+ }
+ return float64(count)
+}
+
+// countFunc is a XPath Node Set functions count(node-set).
+func countFunc(q query, t iterator) interface{} {
+ var count = 0
+ test := predicate(q)
+ switch typ := q.Evaluate(t).(type) {
+ case query:
+ for node := typ.Select(t); node != nil; node = typ.Select(t) {
+ if test(node) {
+ count++
+ }
+ }
+ }
+ return float64(count)
+}
+
+// sumFunc is a XPath Node Set functions sum(node-set).
+func sumFunc(q query, t iterator) interface{} {
+ var sum float64
+ switch typ := q.Evaluate(t).(type) {
+ case query:
+ for node := typ.Select(t); node != nil; node = typ.Select(t) {
+ if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
+ sum += v
+ }
+ }
+ case float64:
+ sum = typ
+ case string:
+ v, err := strconv.ParseFloat(typ, 64)
+ if err != nil {
+ panic(errors.New("sum() function argument type must be a node-set or number"))
+ }
+ sum = v
+ }
+ return sum
+}
+
+func asNumber(t iterator, o interface{}) float64 {
+ switch typ := o.(type) {
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return float64(0)
+ }
+ if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
+ return v
+ }
+ case float64:
+ return typ
+ case string:
+ v, err := strconv.ParseFloat(typ, 64)
+ if err != nil {
+ panic(errors.New("ceiling() function argument type must be a node-set or number"))
+ }
+ return v
+ }
+ return 0
+}
+
+// ceilingFunc is a XPath Node Set functions ceiling(node-set).
+func ceilingFunc(q query, t iterator) interface{} {
+ val := asNumber(t, q.Evaluate(t))
+ return math.Ceil(val)
+}
+
+// floorFunc is a XPath Node Set functions floor(node-set).
+func floorFunc(q query, t iterator) interface{} {
+ val := asNumber(t, q.Evaluate(t))
+ return math.Floor(val)
+}
+
+// roundFunc is a XPath Node Set functions round(node-set).
+func roundFunc(q query, t iterator) interface{} {
+ val := asNumber(t, q.Evaluate(t))
+ //return math.Round(val)
+ return round(val)
+}
+
+// nameFunc is a XPath functions name([node-set]).
+func nameFunc(q query, t iterator) interface{} {
+ v := q.Select(t)
+ if v == nil {
+ return ""
+ }
+ ns := v.Prefix()
+ if ns == "" {
+ return v.LocalName()
+ }
+ return ns + ":" + v.LocalName()
+}
+
+// localNameFunc is a XPath functions local-name([node-set]).
+func localNameFunc(q query, t iterator) interface{} {
+ v := q.Select(t)
+ if v == nil {
+ return ""
+ }
+ return v.LocalName()
+}
+
+// namespaceFunc is a XPath functions namespace-uri([node-set]).
+func namespaceFunc(q query, t iterator) interface{} {
+ v := q.Select(t)
+ if v == nil {
+ return ""
+ }
+ return v.Prefix()
+}
+
+func asBool(t iterator, v interface{}) bool {
+ switch v := v.(type) {
+ case nil:
+ return false
+ case *NodeIterator:
+ return v.MoveNext()
+ case bool:
+ return bool(v)
+ case float64:
+ return v != 0
+ case string:
+ return v != ""
+ case query:
+ return v.Select(t) != nil
+ default:
+ panic(fmt.Errorf("unexpected type: %T", v))
+ }
+}
+
+func asString(t iterator, v interface{}) string {
+ switch v := v.(type) {
+ case nil:
+ return ""
+ case bool:
+ if v {
+ return "true"
+ }
+ return "false"
+ case float64:
+ return strconv.FormatFloat(v, 'g', -1, 64)
+ case string:
+ return v
+ case query:
+ node := v.Select(t)
+ if node == nil {
+ return ""
+ }
+ return node.Value()
+ default:
+ panic(fmt.Errorf("unexpected type: %T", v))
+ }
+}
+
+// booleanFunc is a XPath functions boolean([node-set]).
+func booleanFunc(q query, t iterator) interface{} {
+ v := q.Evaluate(t)
+ return asBool(t, v)
+}
+
+// numberFunc is a XPath functions number([node-set]).
+func numberFunc(q query, t iterator) interface{} {
+ v := q.Evaluate(t)
+ return asNumber(t, v)
+}
+
+// stringFunc is a XPath functions string([node-set]).
+func stringFunc(q query, t iterator) interface{} {
+ v := q.Evaluate(t)
+ return asString(t, v)
+}
+
+// startwithFunc is a XPath functions starts-with(string, string).
+func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var (
+ m, n string
+ ok bool
+ )
+ switch typ := arg1.Evaluate(t).(type) {
+ case string:
+ m = typ
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return false
+ }
+ m = node.Value()
+ default:
+ panic(errors.New("starts-with() function argument type must be string"))
+ }
+ n, ok = arg2.Evaluate(t).(string)
+ if !ok {
+ panic(errors.New("starts-with() function argument type must be string"))
+ }
+ return strings.HasPrefix(m, n)
+ }
+}
+
+// endwithFunc is a XPath functions ends-with(string, string).
+func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var (
+ m, n string
+ ok bool
+ )
+ switch typ := arg1.Evaluate(t).(type) {
+ case string:
+ m = typ
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return false
+ }
+ m = node.Value()
+ default:
+ panic(errors.New("ends-with() function argument type must be string"))
+ }
+ n, ok = arg2.Evaluate(t).(string)
+ if !ok {
+ panic(errors.New("ends-with() function argument type must be string"))
+ }
+ return strings.HasSuffix(m, n)
+ }
+}
+
+// containsFunc is a XPath functions contains(string or @attr, string).
+func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var (
+ m, n string
+ ok bool
+ )
+
+ switch typ := arg1.Evaluate(t).(type) {
+ case string:
+ m = typ
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return false
+ }
+ m = node.Value()
+ default:
+ panic(errors.New("contains() function argument type must be string"))
+ }
+
+ n, ok = arg2.Evaluate(t).(string)
+ if !ok {
+ panic(errors.New("contains() function argument type must be string"))
+ }
+
+ return strings.Contains(m, n)
+ }
+}
+
+var (
+ regnewline = regexp.MustCompile(`[\r\n\t]`)
+ regseqspace = regexp.MustCompile(`\s{2,}`)
+)
+
+// normalizespaceFunc is XPath functions normalize-space(string?)
+func normalizespaceFunc(q query, t iterator) interface{} {
+ var m string
+ switch typ := q.Evaluate(t).(type) {
+ case string:
+ m = typ
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return ""
+ }
+ m = node.Value()
+ }
+ m = strings.TrimSpace(m)
+ m = regnewline.ReplaceAllString(m, " ")
+ m = regseqspace.ReplaceAllString(m, " ")
+ return m
+}
+
+// substringFunc is XPath functions substring function returns a part of a given string.
+func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var m string
+ switch typ := arg1.Evaluate(t).(type) {
+ case string:
+ m = typ
+ case query:
+ node := typ.Select(t)
+ if node == nil {
+ return ""
+ }
+ m = node.Value()
+ }
+
+ var start, length float64
+ var ok bool
+
+ if start, ok = arg2.Evaluate(t).(float64); !ok {
+ panic(errors.New("substring() function first argument type must be int"))
+ } else if start < 1 {
+ panic(errors.New("substring() function first argument type must be >= 1"))
+ }
+ start--
+ if arg3 != nil {
+ if length, ok = arg3.Evaluate(t).(float64); !ok {
+ panic(errors.New("substring() function second argument type must be int"))
+ }
+ }
+ if (len(m) - int(start)) < int(length) {
+ panic(errors.New("substring() function start and length argument out of range"))
+ }
+ if length > 0 {
+ return m[int(start):int(length+start)]
+ }
+ return m[int(start):]
+ }
+}
+
+// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
+func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var str string
+ switch v := arg1.Evaluate(t).(type) {
+ case string:
+ str = v
+ case query:
+ node := v.Select(t)
+ if node == nil {
+ return ""
+ }
+ str = node.Value()
+ }
+ var word string
+ switch v := arg2.Evaluate(t).(type) {
+ case string:
+ word = v
+ case query:
+ node := v.Select(t)
+ if node == nil {
+ return ""
+ }
+ word = node.Value()
+ }
+ if word == "" {
+ return ""
+ }
+
+ i := strings.Index(str, word)
+ if i < 0 {
+ return ""
+ }
+ if after {
+ return str[i+len(word):]
+ }
+ return str[:i]
+ }
+}
+
+// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
+// equal to the number of characters in a given string.
+func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ switch v := arg1.Evaluate(t).(type) {
+ case string:
+ return float64(len(v))
+ case query:
+ node := v.Select(t)
+ if node == nil {
+ break
+ }
+ return float64(len(node.Value()))
+ }
+ return float64(0)
+ }
+}
+
+// translateFunc is XPath functions translate() function returns a replaced string.
+func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ str := asString(t, arg1.Evaluate(t))
+ src := asString(t, arg2.Evaluate(t))
+ dst := asString(t, arg3.Evaluate(t))
+
+ var replace []string
+ for i, s := range src {
+ d := ""
+ if i < len(dst) {
+ d = string(dst[i])
+ }
+ replace = append(replace, string(s), d)
+ }
+ return strings.NewReplacer(replace...).Replace(str)
+ }
+}
+
+// notFunc is XPATH functions not(expression) function operation.
+func notFunc(q query, t iterator) interface{} {
+ switch v := q.Evaluate(t).(type) {
+ case bool:
+ return !v
+ case query:
+ node := v.Select(t)
+ return node == nil
+ default:
+ return false
+ }
+}
+
+// concatFunc is the concat function concatenates two or more
+// strings and returns the resulting string.
+// concat( string1 , string2 [, stringn]* )
+func concatFunc(args ...query) func(query, iterator) interface{} {
+ return func(q query, t iterator) interface{} {
+ var a []string
+ for _, v := range args {
+ switch v := v.Evaluate(t).(type) {
+ case string:
+ a = append(a, v)
+ case query:
+ node := v.Select(t)
+ if node != nil {
+ a = append(a, node.Value())
+ }
+ }
+ }
+ return strings.Join(a, "")
+ }
+}
diff --git a/vendor/github.com/antchfx/xpath/func_go110.go b/vendor/github.com/antchfx/xpath/func_go110.go
new file mode 100644
index 0000000..500880f
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/func_go110.go
@@ -0,0 +1,9 @@
+// +build go1.10
+
+package xpath
+
+import "math"
+
+func round(f float64) int {
+ return int(math.Round(f))
+}
diff --git a/vendor/github.com/antchfx/xpath/func_pre_go110.go b/vendor/github.com/antchfx/xpath/func_pre_go110.go
new file mode 100644
index 0000000..043616b
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/func_pre_go110.go
@@ -0,0 +1,15 @@
+// +build !go1.10
+
+package xpath
+
+import "math"
+
+// math.Round() is supported by Go 1.10+,
+// This method just compatible for version <1.10.
+// https://github.com/golang/go/issues/20100
+func round(f float64) int {
+ if math.Abs(f) < 0.5 {
+ return 0
+ }
+ return int(f + math.Copysign(0.5, f))
+}
diff --git a/vendor/github.com/antchfx/xpath/operator.go b/vendor/github.com/antchfx/xpath/operator.go
new file mode 100644
index 0000000..308d3cb
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/operator.go
@@ -0,0 +1,295 @@
+package xpath
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// The XPath number operator function list.
+
+// valueType is a return value type.
+type valueType int
+
+const (
+ booleanType valueType = iota
+ numberType
+ stringType
+ nodeSetType
+)
+
+func getValueType(i interface{}) valueType {
+ v := reflect.ValueOf(i)
+ switch v.Kind() {
+ case reflect.Float64:
+ return numberType
+ case reflect.String:
+ return stringType
+ case reflect.Bool:
+ return booleanType
+ default:
+ if _, ok := i.(query); ok {
+ return nodeSetType
+ }
+ }
+ panic(fmt.Errorf("xpath unknown value type: %v", v.Kind()))
+}
+
+type logical func(iterator, string, interface{}, interface{}) bool
+
+var logicalFuncs = [][]logical{
+ {cmpBooleanBoolean, nil, nil, nil},
+ {nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet},
+ {nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet},
+ {nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet},
+}
+
+// number vs number
+func cmpNumberNumberF(op string, a, b float64) bool {
+ switch op {
+ case "=":
+ return a == b
+ case ">":
+ return a > b
+ case "<":
+ return a < b
+ case ">=":
+ return a >= b
+ case "<=":
+ return a <= b
+ case "!=":
+ return a != b
+ }
+ return false
+}
+
+// string vs string
+func cmpStringStringF(op string, a, b string) bool {
+ switch op {
+ case "=":
+ return a == b
+ case ">":
+ return a > b
+ case "<":
+ return a < b
+ case ">=":
+ return a >= b
+ case "<=":
+ return a <= b
+ case "!=":
+ return a != b
+ }
+ return false
+}
+
+func cmpBooleanBooleanF(op string, a, b bool) bool {
+ switch op {
+ case "or":
+ return a || b
+ case "and":
+ return a && b
+ }
+ return false
+}
+
+func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool {
+ a := m.(float64)
+ b := n.(float64)
+ return cmpNumberNumberF(op, a, b)
+}
+
+func cmpNumericString(t iterator, op string, m, n interface{}) bool {
+ a := m.(float64)
+ b := n.(string)
+ num, err := strconv.ParseFloat(b, 64)
+ if err != nil {
+ panic(err)
+ }
+ return cmpNumberNumberF(op, a, num)
+}
+
+func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool {
+ a := m.(float64)
+ b := n.(query)
+
+ for {
+ node := b.Select(t)
+ if node == nil {
+ break
+ }
+ num, err := strconv.ParseFloat(node.Value(), 64)
+ if err != nil {
+ panic(err)
+ }
+ if cmpNumberNumberF(op, a, num) {
+ return true
+ }
+ }
+ return false
+}
+
+func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool {
+ a := m.(query)
+ b := n.(float64)
+ for {
+ node := a.Select(t)
+ if node == nil {
+ break
+ }
+ num, err := strconv.ParseFloat(node.Value(), 64)
+ if err != nil {
+ panic(err)
+ }
+ if cmpNumberNumberF(op, num, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func cmpNodeSetString(t iterator, op string, m, n interface{}) bool {
+ a := m.(query)
+ b := n.(string)
+ for {
+ node := a.Select(t)
+ if node == nil {
+ break
+ }
+ if cmpStringStringF(op, b, node.Value()) {
+ return true
+ }
+ }
+ return false
+}
+
+func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool {
+ return false
+}
+
+func cmpStringNumeric(t iterator, op string, m, n interface{}) bool {
+ a := m.(string)
+ b := n.(float64)
+ num, err := strconv.ParseFloat(a, 64)
+ if err != nil {
+ panic(err)
+ }
+ return cmpNumberNumberF(op, b, num)
+}
+
+func cmpStringString(t iterator, op string, m, n interface{}) bool {
+ a := m.(string)
+ b := n.(string)
+ return cmpStringStringF(op, a, b)
+}
+
+func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool {
+ a := m.(string)
+ b := n.(query)
+ for {
+ node := b.Select(t)
+ if node == nil {
+ break
+ }
+ if cmpStringStringF(op, a, node.Value()) {
+ return true
+ }
+ }
+ return false
+}
+
+func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool {
+ a := m.(bool)
+ b := n.(bool)
+ return cmpBooleanBooleanF(op, a, b)
+}
+
+// eqFunc is an `=` operator.
+func eqFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, "=", m, n)
+}
+
+// gtFunc is an `>` operator.
+func gtFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, ">", m, n)
+}
+
+// geFunc is an `>=` operator.
+func geFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, ">=", m, n)
+}
+
+// ltFunc is an `<` operator.
+func ltFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, "<", m, n)
+}
+
+// leFunc is an `<=` operator.
+func leFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, "<=", m, n)
+}
+
+// neFunc is an `!=` operator.
+func neFunc(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, "!=", m, n)
+}
+
+// orFunc is an `or` operator.
+var orFunc = func(t iterator, m, n interface{}) interface{} {
+ t1 := getValueType(m)
+ t2 := getValueType(n)
+ return logicalFuncs[t1][t2](t, "or", m, n)
+}
+
+func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 {
+ typ := reflect.TypeOf(float64(0))
+ a := reflect.ValueOf(m).Convert(typ)
+ b := reflect.ValueOf(n).Convert(typ)
+ return cb(a.Float(), b.Float())
+}
+
+// plusFunc is an `+` operator.
+var plusFunc = func(m, n interface{}) interface{} {
+ return numericExpr(m, n, func(a, b float64) float64 {
+ return a + b
+ })
+}
+
+// minusFunc is an `-` operator.
+var minusFunc = func(m, n interface{}) interface{} {
+ return numericExpr(m, n, func(a, b float64) float64 {
+ return a - b
+ })
+}
+
+// mulFunc is an `*` operator.
+var mulFunc = func(m, n interface{}) interface{} {
+ return numericExpr(m, n, func(a, b float64) float64 {
+ return a * b
+ })
+}
+
+// divFunc is an `DIV` operator.
+var divFunc = func(m, n interface{}) interface{} {
+ return numericExpr(m, n, func(a, b float64) float64 {
+ return a / b
+ })
+}
+
+// modFunc is an 'MOD' operator.
+var modFunc = func(m, n interface{}) interface{} {
+ return numericExpr(m, n, func(a, b float64) float64 {
+ return float64(int(a) % int(b))
+ })
+}
diff --git a/vendor/github.com/antchfx/xpath/parse.go b/vendor/github.com/antchfx/xpath/parse.go
new file mode 100644
index 0000000..fb9abe3
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/parse.go
@@ -0,0 +1,1186 @@
+package xpath
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "unicode"
+)
+
+// A XPath expression token type.
+type itemType int
+
+const (
+ itemComma itemType = iota // ','
+ itemSlash // '/'
+ itemAt // '@'
+ itemDot // '.'
+ itemLParens // '('
+ itemRParens // ')'
+ itemLBracket // '['
+ itemRBracket // ']'
+ itemStar // '*'
+ itemPlus // '+'
+ itemMinus // '-'
+ itemEq // '='
+ itemLt // '<'
+ itemGt // '>'
+ itemBang // '!'
+ itemDollar // '$'
+ itemApos // '\''
+ itemQuote // '"'
+ itemUnion // '|'
+ itemNe // '!='
+ itemLe // '<='
+ itemGe // '>='
+ itemAnd // '&&'
+ itemOr // '||'
+ itemDotDot // '..'
+ itemSlashSlash // '//'
+ itemName // XML Name
+ itemString // Quoted string constant
+ itemNumber // Number constant
+ itemAxe // Axe (like child::)
+ itemEOF // END
+)
+
+// A node is an XPath node in the parse tree.
+type node interface {
+ Type() nodeType
+}
+
+// nodeType identifies the type of a parse tree node.
+type nodeType int
+
+func (t nodeType) Type() nodeType {
+ return t
+}
+
+const (
+ nodeRoot nodeType = iota
+ nodeAxis
+ nodeFilter
+ nodeFunction
+ nodeOperator
+ nodeVariable
+ nodeConstantOperand
+)
+
+type parser struct {
+ r *scanner
+ d int
+}
+
+// newOperatorNode returns new operator node OperatorNode.
+func newOperatorNode(op string, left, right node) node {
+ return &operatorNode{nodeType: nodeOperator, Op: op, Left: left, Right: right}
+}
+
+// newOperand returns new constant operand node OperandNode.
+func newOperandNode(v interface{}) node {
+ return &operandNode{nodeType: nodeConstantOperand, Val: v}
+}
+
+// newAxisNode returns new axis node AxisNode.
+func newAxisNode(axeTyp, localName, prefix, prop string, n node) node {
+ return &axisNode{
+ nodeType: nodeAxis,
+ LocalName: localName,
+ Prefix: prefix,
+ AxeType: axeTyp,
+ Prop: prop,
+ Input: n,
+ }
+}
+
+// newVariableNode returns new variable node VariableNode.
+func newVariableNode(prefix, name string) node {
+ return &variableNode{nodeType: nodeVariable, Name: name, Prefix: prefix}
+}
+
+// newFilterNode returns a new filter node FilterNode.
+func newFilterNode(n, m node) node {
+ return &filterNode{nodeType: nodeFilter, Input: n, Condition: m}
+}
+
+// newRootNode returns a root node.
+func newRootNode(s string) node {
+ return &rootNode{nodeType: nodeRoot, slash: s}
+}
+
+// newFunctionNode returns function call node.
+func newFunctionNode(name, prefix string, args []node) node {
+ return &functionNode{nodeType: nodeFunction, Prefix: prefix, FuncName: name, Args: args}
+}
+
+// testOp reports whether current item name is an operand op.
+func testOp(r *scanner, op string) bool {
+ return r.typ == itemName && r.prefix == "" && r.name == op
+}
+
+func isPrimaryExpr(r *scanner) bool {
+ switch r.typ {
+ case itemString, itemNumber, itemDollar, itemLParens:
+ return true
+ case itemName:
+ return r.canBeFunc && !isNodeType(r)
+ }
+ return false
+}
+
+func isNodeType(r *scanner) bool {
+ switch r.name {
+ case "node", "text", "processing-instruction", "comment":
+ return r.prefix == ""
+ }
+ return false
+}
+
+func isStep(item itemType) bool {
+ switch item {
+ case itemDot, itemDotDot, itemAt, itemAxe, itemStar, itemName:
+ return true
+ }
+ return false
+}
+
+func checkItem(r *scanner, typ itemType) {
+ if r.typ != typ {
+ panic(fmt.Sprintf("%s has an invalid token", r.text))
+ }
+}
+
+// parseExpression parsing the expression with input node n.
+func (p *parser) parseExpression(n node) node {
+ if p.d = p.d + 1; p.d > 200 {
+ panic("the xpath query is too complex(depth > 200)")
+ }
+ n = p.parseOrExpr(n)
+ p.d--
+ return n
+}
+
+// next scanning next item on forward.
+func (p *parser) next() bool {
+ return p.r.nextItem()
+}
+
+func (p *parser) skipItem(typ itemType) {
+ checkItem(p.r, typ)
+ p.next()
+}
+
+// OrExpr ::= AndExpr | OrExpr 'or' AndExpr
+func (p *parser) parseOrExpr(n node) node {
+ opnd := p.parseAndExpr(n)
+ for {
+ if !testOp(p.r, "or") {
+ break
+ }
+ p.next()
+ opnd = newOperatorNode("or", opnd, p.parseAndExpr(n))
+ }
+ return opnd
+}
+
+// AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr
+func (p *parser) parseAndExpr(n node) node {
+ opnd := p.parseEqualityExpr(n)
+ for {
+ if !testOp(p.r, "and") {
+ break
+ }
+ p.next()
+ opnd = newOperatorNode("and", opnd, p.parseEqualityExpr(n))
+ }
+ return opnd
+}
+
+// EqualityExpr ::= RelationalExpr | EqualityExpr '=' RelationalExpr | EqualityExpr '!=' RelationalExpr
+func (p *parser) parseEqualityExpr(n node) node {
+ opnd := p.parseRelationalExpr(n)
+Loop:
+ for {
+ var op string
+ switch p.r.typ {
+ case itemEq:
+ op = "="
+ case itemNe:
+ op = "!="
+ default:
+ break Loop
+ }
+ p.next()
+ opnd = newOperatorNode(op, opnd, p.parseRelationalExpr(n))
+ }
+ return opnd
+}
+
+// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr
+// | RelationalExpr '<=' AdditiveExpr
+// | RelationalExpr '>=' AdditiveExpr
+func (p *parser) parseRelationalExpr(n node) node {
+ opnd := p.parseAdditiveExpr(n)
+Loop:
+ for {
+ var op string
+ switch p.r.typ {
+ case itemLt:
+ op = "<"
+ case itemGt:
+ op = ">"
+ case itemLe:
+ op = "<="
+ case itemGe:
+ op = ">="
+ default:
+ break Loop
+ }
+ p.next()
+ opnd = newOperatorNode(op, opnd, p.parseAdditiveExpr(n))
+ }
+ return opnd
+}
+
+// AdditiveExpr ::= MultiplicativeExpr | AdditiveExpr '+' MultiplicativeExpr | AdditiveExpr '-' MultiplicativeExpr
+func (p *parser) parseAdditiveExpr(n node) node {
+ opnd := p.parseMultiplicativeExpr(n)
+Loop:
+ for {
+ var op string
+ switch p.r.typ {
+ case itemPlus:
+ op = "+"
+ case itemMinus:
+ op = "-"
+ default:
+ break Loop
+ }
+ p.next()
+ opnd = newOperatorNode(op, opnd, p.parseMultiplicativeExpr(n))
+ }
+ return opnd
+}
+
+// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr
+// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr
+func (p *parser) parseMultiplicativeExpr(n node) node {
+ opnd := p.parseUnaryExpr(n)
+Loop:
+ for {
+ var op string
+ if p.r.typ == itemStar {
+ op = "*"
+ } else if testOp(p.r, "div") || testOp(p.r, "mod") {
+ op = p.r.name
+ } else {
+ break Loop
+ }
+ p.next()
+ opnd = newOperatorNode(op, opnd, p.parseUnaryExpr(n))
+ }
+ return opnd
+}
+
+// UnaryExpr ::= UnionExpr | '-' UnaryExpr
+func (p *parser) parseUnaryExpr(n node) node {
+ minus := false
+ // ignore '-' sequence
+ for p.r.typ == itemMinus {
+ p.next()
+ minus = !minus
+ }
+ opnd := p.parseUnionExpr(n)
+ if minus {
+ opnd = newOperatorNode("*", opnd, newOperandNode(float64(-1)))
+ }
+ return opnd
+}
+
+// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr
+func (p *parser) parseUnionExpr(n node) node {
+ opnd := p.parsePathExpr(n)
+Loop:
+ for {
+ if p.r.typ != itemUnion {
+ break Loop
+ }
+ p.next()
+ opnd2 := p.parsePathExpr(n)
+ // Checking the node type that must be is node set type?
+ opnd = newOperatorNode("|", opnd, opnd2)
+ }
+ return opnd
+}
+
+// PathExpr ::= LocationPath | FilterExpr | FilterExpr '/' RelativeLocationPath | FilterExpr '//' RelativeLocationPath
+func (p *parser) parsePathExpr(n node) node {
+ var opnd node
+ if isPrimaryExpr(p.r) {
+ opnd = p.parseFilterExpr(n)
+ switch p.r.typ {
+ case itemSlash:
+ p.next()
+ opnd = p.parseRelativeLocationPath(opnd)
+ case itemSlashSlash:
+ p.next()
+ opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
+ }
+ } else {
+ opnd = p.parseLocationPath(nil)
+ }
+ return opnd
+}
+
+// FilterExpr ::= PrimaryExpr | FilterExpr Predicate
+func (p *parser) parseFilterExpr(n node) node {
+ opnd := p.parsePrimaryExpr(n)
+ if p.r.typ == itemLBracket {
+ opnd = newFilterNode(opnd, p.parsePredicate(opnd))
+ }
+ return opnd
+}
+
+// Predicate ::= '[' PredicateExpr ']'
+func (p *parser) parsePredicate(n node) node {
+ p.skipItem(itemLBracket)
+ opnd := p.parseExpression(n)
+ p.skipItem(itemRBracket)
+ return opnd
+}
+
+// LocationPath ::= RelativeLocationPath | AbsoluteLocationPath
+func (p *parser) parseLocationPath(n node) (opnd node) {
+ switch p.r.typ {
+ case itemSlash:
+ p.next()
+ opnd = newRootNode("/")
+ if isStep(p.r.typ) {
+ opnd = p.parseRelativeLocationPath(opnd) // ?? child:: or self ??
+ }
+ case itemSlashSlash:
+ p.next()
+ opnd = newRootNode("//")
+ opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd))
+ default:
+ opnd = p.parseRelativeLocationPath(n)
+ }
+ return opnd
+}
+
+// RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | AbbreviatedRelativeLocationPath
+func (p *parser) parseRelativeLocationPath(n node) node {
+ opnd := n
+Loop:
+ for {
+ opnd = p.parseStep(opnd)
+ switch p.r.typ {
+ case itemSlashSlash:
+ p.next()
+ opnd = newAxisNode("descendant-or-self", "", "", "", opnd)
+ case itemSlash:
+ p.next()
+ default:
+ break Loop
+ }
+ }
+ return opnd
+}
+
+// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep
+func (p *parser) parseStep(n node) (opnd node) {
+ axeTyp := "child" // default axes value.
+ if p.r.typ == itemDot || p.r.typ == itemDotDot {
+ if p.r.typ == itemDot {
+ axeTyp = "self"
+ } else {
+ axeTyp = "parent"
+ }
+ p.next()
+ opnd = newAxisNode(axeTyp, "", "", "", n)
+ if p.r.typ != itemLBracket {
+ return opnd
+ }
+ } else {
+ switch p.r.typ {
+ case itemAt:
+ p.next()
+ axeTyp = "attribute"
+ case itemAxe:
+ axeTyp = p.r.name
+ p.next()
+ case itemLParens:
+ return p.parseSequence(n)
+ }
+ opnd = p.parseNodeTest(n, axeTyp)
+ }
+ for p.r.typ == itemLBracket {
+ opnd = newFilterNode(opnd, p.parsePredicate(opnd))
+ }
+ return opnd
+}
+
+// Expr ::= '(' Step ("," Step)* ')'
+func (p *parser) parseSequence(n node) (opnd node) {
+ p.skipItem(itemLParens)
+ opnd = p.parseStep(n)
+ for {
+ if p.r.typ != itemComma {
+ break
+ }
+ p.next()
+ opnd2 := p.parseStep(n)
+ opnd = newOperatorNode("|", opnd, opnd2)
+ }
+ p.skipItem(itemRParens)
+ return opnd
+}
+
+// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')'
+func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) {
+ switch p.r.typ {
+ case itemName:
+ if p.r.canBeFunc && isNodeType(p.r) {
+ var prop string
+ switch p.r.name {
+ case "comment", "text", "processing-instruction", "node":
+ prop = p.r.name
+ }
+ var name string
+ p.next()
+ p.skipItem(itemLParens)
+ if prop == "processing-instruction" && p.r.typ != itemRParens {
+ checkItem(p.r, itemString)
+ name = p.r.strval
+ p.next()
+ }
+ p.skipItem(itemRParens)
+ opnd = newAxisNode(axeTyp, name, "", prop, n)
+ } else {
+ prefix := p.r.prefix
+ name := p.r.name
+ p.next()
+ if p.r.name == "*" {
+ name = ""
+ }
+ opnd = newAxisNode(axeTyp, name, prefix, "", n)
+ }
+ case itemStar:
+ opnd = newAxisNode(axeTyp, "", "", "", n)
+ p.next()
+ default:
+ panic("expression must evaluate to a node-set")
+ }
+ return opnd
+}
+
+// PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall
+func (p *parser) parsePrimaryExpr(n node) (opnd node) {
+ switch p.r.typ {
+ case itemString:
+ opnd = newOperandNode(p.r.strval)
+ p.next()
+ case itemNumber:
+ opnd = newOperandNode(p.r.numval)
+ p.next()
+ case itemDollar:
+ p.next()
+ checkItem(p.r, itemName)
+ opnd = newVariableNode(p.r.prefix, p.r.name)
+ p.next()
+ case itemLParens:
+ p.next()
+ opnd = p.parseExpression(n)
+ p.skipItem(itemRParens)
+ case itemName:
+ if p.r.canBeFunc && !isNodeType(p.r) {
+ opnd = p.parseMethod(nil)
+ }
+ }
+ return opnd
+}
+
+// FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')'
+func (p *parser) parseMethod(n node) node {
+ var args []node
+ name := p.r.name
+ prefix := p.r.prefix
+
+ p.skipItem(itemName)
+ p.skipItem(itemLParens)
+ if p.r.typ != itemRParens {
+ for {
+ args = append(args, p.parseExpression(n))
+ if p.r.typ == itemRParens {
+ break
+ }
+ p.skipItem(itemComma)
+ }
+ }
+ p.skipItem(itemRParens)
+ return newFunctionNode(name, prefix, args)
+}
+
+// Parse parsing the XPath express string expr and returns a tree node.
+func parse(expr string) node {
+ r := &scanner{text: expr}
+ r.nextChar()
+ r.nextItem()
+ p := &parser{r: r}
+ return p.parseExpression(nil)
+}
+
+// rootNode holds a top-level node of tree.
+type rootNode struct {
+ nodeType
+ slash string
+}
+
+func (r *rootNode) String() string {
+ return r.slash
+}
+
+// operatorNode holds two Nodes operator.
+type operatorNode struct {
+ nodeType
+ Op string
+ Left, Right node
+}
+
+func (o *operatorNode) String() string {
+ return fmt.Sprintf("%v%s%v", o.Left, o.Op, o.Right)
+}
+
+// axisNode holds a location step.
+type axisNode struct {
+ nodeType
+ Input node
+ Prop string // node-test name.[comment|text|processing-instruction|node]
+ AxeType string // name of the axes.[attribute|ancestor|child|....]
+ LocalName string // local part name of node.
+ Prefix string // prefix name of node.
+}
+
+func (a *axisNode) String() string {
+ var b bytes.Buffer
+ if a.AxeType != "" {
+ b.Write([]byte(a.AxeType + "::"))
+ }
+ if a.Prefix != "" {
+ b.Write([]byte(a.Prefix + ":"))
+ }
+ b.Write([]byte(a.LocalName))
+ if a.Prop != "" {
+ b.Write([]byte("/" + a.Prop + "()"))
+ }
+ return b.String()
+}
+
+// operandNode holds a constant operand.
+type operandNode struct {
+ nodeType
+ Val interface{}
+}
+
+func (o *operandNode) String() string {
+ return fmt.Sprintf("%v", o.Val)
+}
+
+// filterNode holds a condition filter.
+type filterNode struct {
+ nodeType
+ Input, Condition node
+}
+
+func (f *filterNode) String() string {
+ return fmt.Sprintf("%s[%s]", f.Input, f.Condition)
+}
+
+// variableNode holds a variable.
+type variableNode struct {
+ nodeType
+ Name, Prefix string
+}
+
+func (v *variableNode) String() string {
+ if v.Prefix == "" {
+ return v.Name
+ }
+ return fmt.Sprintf("%s:%s", v.Prefix, v.Name)
+}
+
+// functionNode holds a function call.
+type functionNode struct {
+ nodeType
+ Args []node
+ Prefix string
+ FuncName string // function name
+}
+
+func (f *functionNode) String() string {
+ var b bytes.Buffer
+ // fun(arg1, ..., argn)
+ b.Write([]byte(f.FuncName))
+ b.Write([]byte("("))
+ for i, arg := range f.Args {
+ if i > 0 {
+ b.Write([]byte(","))
+ }
+ b.Write([]byte(fmt.Sprintf("%s", arg)))
+ }
+ b.Write([]byte(")"))
+ return b.String()
+}
+
+type scanner struct {
+ text, name, prefix string
+
+ pos int
+ curr rune
+ typ itemType
+ strval string // text value at current pos
+ numval float64 // number value at current pos
+ canBeFunc bool
+}
+
+func (s *scanner) nextChar() bool {
+ if s.pos >= len(s.text) {
+ s.curr = rune(0)
+ return false
+ }
+ s.curr = rune(s.text[s.pos])
+ s.pos++
+ return true
+}
+
+func (s *scanner) nextItem() bool {
+ s.skipSpace()
+ switch s.curr {
+ case 0:
+ s.typ = itemEOF
+ return false
+ case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$':
+ s.typ = asItemType(s.curr)
+ s.nextChar()
+ case '<':
+ s.typ = itemLt
+ s.nextChar()
+ if s.curr == '=' {
+ s.typ = itemLe
+ s.nextChar()
+ }
+ case '>':
+ s.typ = itemGt
+ s.nextChar()
+ if s.curr == '=' {
+ s.typ = itemGe
+ s.nextChar()
+ }
+ case '!':
+ s.typ = itemBang
+ s.nextChar()
+ if s.curr == '=' {
+ s.typ = itemNe
+ s.nextChar()
+ }
+ case '.':
+ s.typ = itemDot
+ s.nextChar()
+ if s.curr == '.' {
+ s.typ = itemDotDot
+ s.nextChar()
+ } else if isDigit(s.curr) {
+ s.typ = itemNumber
+ s.numval = s.scanFraction()
+ }
+ case '/':
+ s.typ = itemSlash
+ s.nextChar()
+ if s.curr == '/' {
+ s.typ = itemSlashSlash
+ s.nextChar()
+ }
+ case '"', '\'':
+ s.typ = itemString
+ s.strval = s.scanString()
+ default:
+ if isDigit(s.curr) {
+ s.typ = itemNumber
+ s.numval = s.scanNumber()
+ } else if isName(s.curr) {
+ s.typ = itemName
+ s.name = s.scanName()
+ s.prefix = ""
+ // "foo:bar" is one itemem not three because it doesn't allow spaces in between
+ // We should distinct it from "foo::" and need process "foo ::" as well
+ if s.curr == ':' {
+ s.nextChar()
+ // can be "foo:bar" or "foo::"
+ if s.curr == ':' {
+ // "foo::"
+ s.nextChar()
+ s.typ = itemAxe
+ } else { // "foo:*", "foo:bar" or "foo: "
+ s.prefix = s.name
+ if s.curr == '*' {
+ s.nextChar()
+ s.name = "*"
+ } else if isName(s.curr) {
+ s.name = s.scanName()
+ } else {
+ panic(fmt.Sprintf("%s has an invalid qualified name.", s.text))
+ }
+ }
+ } else {
+ s.skipSpace()
+ if s.curr == ':' {
+ s.nextChar()
+ // it can be "foo ::" or just "foo :"
+ if s.curr == ':' {
+ s.nextChar()
+ s.typ = itemAxe
+ } else {
+ panic(fmt.Sprintf("%s has an invalid qualified name.", s.text))
+ }
+ }
+ }
+ s.skipSpace()
+ s.canBeFunc = s.curr == '('
+ } else {
+ panic(fmt.Sprintf("%s has an invalid token.", s.text))
+ }
+ }
+ return true
+}
+
+func (s *scanner) skipSpace() {
+Loop:
+ for {
+ if !unicode.IsSpace(s.curr) || !s.nextChar() {
+ break Loop
+ }
+ }
+}
+
+func (s *scanner) scanFraction() float64 {
+ var (
+ i = s.pos - 2
+ c = 1 // '.'
+ )
+ for isDigit(s.curr) {
+ s.nextChar()
+ c++
+ }
+ v, err := strconv.ParseFloat(s.text[i:i+c], 64)
+ if err != nil {
+ panic(fmt.Errorf("xpath: scanFraction parse float got error: %v", err))
+ }
+ return v
+}
+
+func (s *scanner) scanNumber() float64 {
+ var (
+ c int
+ i = s.pos - 1
+ )
+ for isDigit(s.curr) {
+ s.nextChar()
+ c++
+ }
+ if s.curr == '.' {
+ s.nextChar()
+ c++
+ for isDigit(s.curr) {
+ s.nextChar()
+ c++
+ }
+ }
+ v, err := strconv.ParseFloat(s.text[i:i+c], 64)
+ if err != nil {
+ panic(fmt.Errorf("xpath: scanNumber parse float got error: %v", err))
+ }
+ return v
+}
+
+func (s *scanner) scanString() string {
+ var (
+ c = 0
+ end = s.curr
+ )
+ s.nextChar()
+ i := s.pos - 1
+ for s.curr != end {
+ if !s.nextChar() {
+ panic(errors.New("xpath: scanString got unclosed string"))
+ }
+ c++
+ }
+ s.nextChar()
+ return s.text[i : i+c]
+}
+
+func (s *scanner) scanName() string {
+ var (
+ c int
+ i = s.pos - 1
+ )
+ for isName(s.curr) {
+ c++
+ if !s.nextChar() {
+ break
+ }
+ }
+ return s.text[i : i+c]
+}
+
+func isName(r rune) bool {
+ return string(r) != ":" && string(r) != "/" &&
+ (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*")
+}
+
+func isDigit(r rune) bool {
+ return unicode.IsDigit(r)
+}
+
+func asItemType(r rune) itemType {
+ switch r {
+ case ',':
+ return itemComma
+ case '@':
+ return itemAt
+ case '(':
+ return itemLParens
+ case ')':
+ return itemRParens
+ case '|':
+ return itemUnion
+ case '*':
+ return itemStar
+ case '[':
+ return itemLBracket
+ case ']':
+ return itemRBracket
+ case '+':
+ return itemPlus
+ case '-':
+ return itemMinus
+ case '=':
+ return itemEq
+ case '$':
+ return itemDollar
+ }
+ panic(fmt.Errorf("unknown item: %v", r))
+}
+
+var first = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1},
+ {0x0041, 0x005A, 1},
+ {0x005F, 0x005F, 1},
+ {0x0061, 0x007A, 1},
+ {0x00C0, 0x00D6, 1},
+ {0x00D8, 0x00F6, 1},
+ {0x00F8, 0x00FF, 1},
+ {0x0100, 0x0131, 1},
+ {0x0134, 0x013E, 1},
+ {0x0141, 0x0148, 1},
+ {0x014A, 0x017E, 1},
+ {0x0180, 0x01C3, 1},
+ {0x01CD, 0x01F0, 1},
+ {0x01F4, 0x01F5, 1},
+ {0x01FA, 0x0217, 1},
+ {0x0250, 0x02A8, 1},
+ {0x02BB, 0x02C1, 1},
+ {0x0386, 0x0386, 1},
+ {0x0388, 0x038A, 1},
+ {0x038C, 0x038C, 1},
+ {0x038E, 0x03A1, 1},
+ {0x03A3, 0x03CE, 1},
+ {0x03D0, 0x03D6, 1},
+ {0x03DA, 0x03E0, 2},
+ {0x03E2, 0x03F3, 1},
+ {0x0401, 0x040C, 1},
+ {0x040E, 0x044F, 1},
+ {0x0451, 0x045C, 1},
+ {0x045E, 0x0481, 1},
+ {0x0490, 0x04C4, 1},
+ {0x04C7, 0x04C8, 1},
+ {0x04CB, 0x04CC, 1},
+ {0x04D0, 0x04EB, 1},
+ {0x04EE, 0x04F5, 1},
+ {0x04F8, 0x04F9, 1},
+ {0x0531, 0x0556, 1},
+ {0x0559, 0x0559, 1},
+ {0x0561, 0x0586, 1},
+ {0x05D0, 0x05EA, 1},
+ {0x05F0, 0x05F2, 1},
+ {0x0621, 0x063A, 1},
+ {0x0641, 0x064A, 1},
+ {0x0671, 0x06B7, 1},
+ {0x06BA, 0x06BE, 1},
+ {0x06C0, 0x06CE, 1},
+ {0x06D0, 0x06D3, 1},
+ {0x06D5, 0x06D5, 1},
+ {0x06E5, 0x06E6, 1},
+ {0x0905, 0x0939, 1},
+ {0x093D, 0x093D, 1},
+ {0x0958, 0x0961, 1},
+ {0x0985, 0x098C, 1},
+ {0x098F, 0x0990, 1},
+ {0x0993, 0x09A8, 1},
+ {0x09AA, 0x09B0, 1},
+ {0x09B2, 0x09B2, 1},
+ {0x09B6, 0x09B9, 1},
+ {0x09DC, 0x09DD, 1},
+ {0x09DF, 0x09E1, 1},
+ {0x09F0, 0x09F1, 1},
+ {0x0A05, 0x0A0A, 1},
+ {0x0A0F, 0x0A10, 1},
+ {0x0A13, 0x0A28, 1},
+ {0x0A2A, 0x0A30, 1},
+ {0x0A32, 0x0A33, 1},
+ {0x0A35, 0x0A36, 1},
+ {0x0A38, 0x0A39, 1},
+ {0x0A59, 0x0A5C, 1},
+ {0x0A5E, 0x0A5E, 1},
+ {0x0A72, 0x0A74, 1},
+ {0x0A85, 0x0A8B, 1},
+ {0x0A8D, 0x0A8D, 1},
+ {0x0A8F, 0x0A91, 1},
+ {0x0A93, 0x0AA8, 1},
+ {0x0AAA, 0x0AB0, 1},
+ {0x0AB2, 0x0AB3, 1},
+ {0x0AB5, 0x0AB9, 1},
+ {0x0ABD, 0x0AE0, 0x23},
+ {0x0B05, 0x0B0C, 1},
+ {0x0B0F, 0x0B10, 1},
+ {0x0B13, 0x0B28, 1},
+ {0x0B2A, 0x0B30, 1},
+ {0x0B32, 0x0B33, 1},
+ {0x0B36, 0x0B39, 1},
+ {0x0B3D, 0x0B3D, 1},
+ {0x0B5C, 0x0B5D, 1},
+ {0x0B5F, 0x0B61, 1},
+ {0x0B85, 0x0B8A, 1},
+ {0x0B8E, 0x0B90, 1},
+ {0x0B92, 0x0B95, 1},
+ {0x0B99, 0x0B9A, 1},
+ {0x0B9C, 0x0B9C, 1},
+ {0x0B9E, 0x0B9F, 1},
+ {0x0BA3, 0x0BA4, 1},
+ {0x0BA8, 0x0BAA, 1},
+ {0x0BAE, 0x0BB5, 1},
+ {0x0BB7, 0x0BB9, 1},
+ {0x0C05, 0x0C0C, 1},
+ {0x0C0E, 0x0C10, 1},
+ {0x0C12, 0x0C28, 1},
+ {0x0C2A, 0x0C33, 1},
+ {0x0C35, 0x0C39, 1},
+ {0x0C60, 0x0C61, 1},
+ {0x0C85, 0x0C8C, 1},
+ {0x0C8E, 0x0C90, 1},
+ {0x0C92, 0x0CA8, 1},
+ {0x0CAA, 0x0CB3, 1},
+ {0x0CB5, 0x0CB9, 1},
+ {0x0CDE, 0x0CDE, 1},
+ {0x0CE0, 0x0CE1, 1},
+ {0x0D05, 0x0D0C, 1},
+ {0x0D0E, 0x0D10, 1},
+ {0x0D12, 0x0D28, 1},
+ {0x0D2A, 0x0D39, 1},
+ {0x0D60, 0x0D61, 1},
+ {0x0E01, 0x0E2E, 1},
+ {0x0E30, 0x0E30, 1},
+ {0x0E32, 0x0E33, 1},
+ {0x0E40, 0x0E45, 1},
+ {0x0E81, 0x0E82, 1},
+ {0x0E84, 0x0E84, 1},
+ {0x0E87, 0x0E88, 1},
+ {0x0E8A, 0x0E8D, 3},
+ {0x0E94, 0x0E97, 1},
+ {0x0E99, 0x0E9F, 1},
+ {0x0EA1, 0x0EA3, 1},
+ {0x0EA5, 0x0EA7, 2},
+ {0x0EAA, 0x0EAB, 1},
+ {0x0EAD, 0x0EAE, 1},
+ {0x0EB0, 0x0EB0, 1},
+ {0x0EB2, 0x0EB3, 1},
+ {0x0EBD, 0x0EBD, 1},
+ {0x0EC0, 0x0EC4, 1},
+ {0x0F40, 0x0F47, 1},
+ {0x0F49, 0x0F69, 1},
+ {0x10A0, 0x10C5, 1},
+ {0x10D0, 0x10F6, 1},
+ {0x1100, 0x1100, 1},
+ {0x1102, 0x1103, 1},
+ {0x1105, 0x1107, 1},
+ {0x1109, 0x1109, 1},
+ {0x110B, 0x110C, 1},
+ {0x110E, 0x1112, 1},
+ {0x113C, 0x1140, 2},
+ {0x114C, 0x1150, 2},
+ {0x1154, 0x1155, 1},
+ {0x1159, 0x1159, 1},
+ {0x115F, 0x1161, 1},
+ {0x1163, 0x1169, 2},
+ {0x116D, 0x116E, 1},
+ {0x1172, 0x1173, 1},
+ {0x1175, 0x119E, 0x119E - 0x1175},
+ {0x11A8, 0x11AB, 0x11AB - 0x11A8},
+ {0x11AE, 0x11AF, 1},
+ {0x11B7, 0x11B8, 1},
+ {0x11BA, 0x11BA, 1},
+ {0x11BC, 0x11C2, 1},
+ {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
+ {0x11F9, 0x11F9, 1},
+ {0x1E00, 0x1E9B, 1},
+ {0x1EA0, 0x1EF9, 1},
+ {0x1F00, 0x1F15, 1},
+ {0x1F18, 0x1F1D, 1},
+ {0x1F20, 0x1F45, 1},
+ {0x1F48, 0x1F4D, 1},
+ {0x1F50, 0x1F57, 1},
+ {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
+ {0x1F5D, 0x1F5D, 1},
+ {0x1F5F, 0x1F7D, 1},
+ {0x1F80, 0x1FB4, 1},
+ {0x1FB6, 0x1FBC, 1},
+ {0x1FBE, 0x1FBE, 1},
+ {0x1FC2, 0x1FC4, 1},
+ {0x1FC6, 0x1FCC, 1},
+ {0x1FD0, 0x1FD3, 1},
+ {0x1FD6, 0x1FDB, 1},
+ {0x1FE0, 0x1FEC, 1},
+ {0x1FF2, 0x1FF4, 1},
+ {0x1FF6, 0x1FFC, 1},
+ {0x2126, 0x2126, 1},
+ {0x212A, 0x212B, 1},
+ {0x212E, 0x212E, 1},
+ {0x2180, 0x2182, 1},
+ {0x3007, 0x3007, 1},
+ {0x3021, 0x3029, 1},
+ {0x3041, 0x3094, 1},
+ {0x30A1, 0x30FA, 1},
+ {0x3105, 0x312C, 1},
+ {0x4E00, 0x9FA5, 1},
+ {0xAC00, 0xD7A3, 1},
+ },
+}
+
+var second = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x002D, 0x002E, 1},
+ {0x0030, 0x0039, 1},
+ {0x00B7, 0x00B7, 1},
+ {0x02D0, 0x02D1, 1},
+ {0x0300, 0x0345, 1},
+ {0x0360, 0x0361, 1},
+ {0x0387, 0x0387, 1},
+ {0x0483, 0x0486, 1},
+ {0x0591, 0x05A1, 1},
+ {0x05A3, 0x05B9, 1},
+ {0x05BB, 0x05BD, 1},
+ {0x05BF, 0x05BF, 1},
+ {0x05C1, 0x05C2, 1},
+ {0x05C4, 0x0640, 0x0640 - 0x05C4},
+ {0x064B, 0x0652, 1},
+ {0x0660, 0x0669, 1},
+ {0x0670, 0x0670, 1},
+ {0x06D6, 0x06DC, 1},
+ {0x06DD, 0x06DF, 1},
+ {0x06E0, 0x06E4, 1},
+ {0x06E7, 0x06E8, 1},
+ {0x06EA, 0x06ED, 1},
+ {0x06F0, 0x06F9, 1},
+ {0x0901, 0x0903, 1},
+ {0x093C, 0x093C, 1},
+ {0x093E, 0x094C, 1},
+ {0x094D, 0x094D, 1},
+ {0x0951, 0x0954, 1},
+ {0x0962, 0x0963, 1},
+ {0x0966, 0x096F, 1},
+ {0x0981, 0x0983, 1},
+ {0x09BC, 0x09BC, 1},
+ {0x09BE, 0x09BF, 1},
+ {0x09C0, 0x09C4, 1},
+ {0x09C7, 0x09C8, 1},
+ {0x09CB, 0x09CD, 1},
+ {0x09D7, 0x09D7, 1},
+ {0x09E2, 0x09E3, 1},
+ {0x09E6, 0x09EF, 1},
+ {0x0A02, 0x0A3C, 0x3A},
+ {0x0A3E, 0x0A3F, 1},
+ {0x0A40, 0x0A42, 1},
+ {0x0A47, 0x0A48, 1},
+ {0x0A4B, 0x0A4D, 1},
+ {0x0A66, 0x0A6F, 1},
+ {0x0A70, 0x0A71, 1},
+ {0x0A81, 0x0A83, 1},
+ {0x0ABC, 0x0ABC, 1},
+ {0x0ABE, 0x0AC5, 1},
+ {0x0AC7, 0x0AC9, 1},
+ {0x0ACB, 0x0ACD, 1},
+ {0x0AE6, 0x0AEF, 1},
+ {0x0B01, 0x0B03, 1},
+ {0x0B3C, 0x0B3C, 1},
+ {0x0B3E, 0x0B43, 1},
+ {0x0B47, 0x0B48, 1},
+ {0x0B4B, 0x0B4D, 1},
+ {0x0B56, 0x0B57, 1},
+ {0x0B66, 0x0B6F, 1},
+ {0x0B82, 0x0B83, 1},
+ {0x0BBE, 0x0BC2, 1},
+ {0x0BC6, 0x0BC8, 1},
+ {0x0BCA, 0x0BCD, 1},
+ {0x0BD7, 0x0BD7, 1},
+ {0x0BE7, 0x0BEF, 1},
+ {0x0C01, 0x0C03, 1},
+ {0x0C3E, 0x0C44, 1},
+ {0x0C46, 0x0C48, 1},
+ {0x0C4A, 0x0C4D, 1},
+ {0x0C55, 0x0C56, 1},
+ {0x0C66, 0x0C6F, 1},
+ {0x0C82, 0x0C83, 1},
+ {0x0CBE, 0x0CC4, 1},
+ {0x0CC6, 0x0CC8, 1},
+ {0x0CCA, 0x0CCD, 1},
+ {0x0CD5, 0x0CD6, 1},
+ {0x0CE6, 0x0CEF, 1},
+ {0x0D02, 0x0D03, 1},
+ {0x0D3E, 0x0D43, 1},
+ {0x0D46, 0x0D48, 1},
+ {0x0D4A, 0x0D4D, 1},
+ {0x0D57, 0x0D57, 1},
+ {0x0D66, 0x0D6F, 1},
+ {0x0E31, 0x0E31, 1},
+ {0x0E34, 0x0E3A, 1},
+ {0x0E46, 0x0E46, 1},
+ {0x0E47, 0x0E4E, 1},
+ {0x0E50, 0x0E59, 1},
+ {0x0EB1, 0x0EB1, 1},
+ {0x0EB4, 0x0EB9, 1},
+ {0x0EBB, 0x0EBC, 1},
+ {0x0EC6, 0x0EC6, 1},
+ {0x0EC8, 0x0ECD, 1},
+ {0x0ED0, 0x0ED9, 1},
+ {0x0F18, 0x0F19, 1},
+ {0x0F20, 0x0F29, 1},
+ {0x0F35, 0x0F39, 2},
+ {0x0F3E, 0x0F3F, 1},
+ {0x0F71, 0x0F84, 1},
+ {0x0F86, 0x0F8B, 1},
+ {0x0F90, 0x0F95, 1},
+ {0x0F97, 0x0F97, 1},
+ {0x0F99, 0x0FAD, 1},
+ {0x0FB1, 0x0FB7, 1},
+ {0x0FB9, 0x0FB9, 1},
+ {0x20D0, 0x20DC, 1},
+ {0x20E1, 0x3005, 0x3005 - 0x20E1},
+ {0x302A, 0x302F, 1},
+ {0x3031, 0x3035, 1},
+ {0x3099, 0x309A, 1},
+ {0x309D, 0x309E, 1},
+ {0x30FC, 0x30FE, 1},
+ },
+}
diff --git a/vendor/github.com/antchfx/xpath/query.go b/vendor/github.com/antchfx/xpath/query.go
new file mode 100644
index 0000000..333fe09
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/query.go
@@ -0,0 +1,824 @@
+package xpath
+
+import (
+ "bytes"
+ "fmt"
+ "hash/fnv"
+ "reflect"
+)
+
+type iterator interface {
+ Current() NodeNavigator
+}
+
+// An XPath query interface.
+type query interface {
+ // Select traversing iterator returns a query matched node NodeNavigator.
+ Select(iterator) NodeNavigator
+
+ // Evaluate evaluates query and returns values of the current query.
+ Evaluate(iterator) interface{}
+
+ Clone() query
+}
+
+// contextQuery is returns current node on the iterator object query.
+type contextQuery struct {
+ count int
+ Root bool // Moving to root-level node in the current context iterator.
+}
+
+func (c *contextQuery) Select(t iterator) (n NodeNavigator) {
+ if c.count == 0 {
+ c.count++
+ n = t.Current().Copy()
+ if c.Root {
+ n.MoveToRoot()
+ }
+ }
+ return n
+}
+
+func (c *contextQuery) Evaluate(iterator) interface{} {
+ c.count = 0
+ return c
+}
+
+func (c *contextQuery) Clone() query {
+ return &contextQuery{count: 0, Root: c.Root}
+}
+
+// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*)
+type ancestorQuery struct {
+ iterator func() NodeNavigator
+
+ Self bool
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (a *ancestorQuery) Select(t iterator) NodeNavigator {
+ for {
+ if a.iterator == nil {
+ node := a.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ first := true
+ a.iterator = func() NodeNavigator {
+ if first && a.Self {
+ first = false
+ if a.Predicate(node) {
+ return node
+ }
+ }
+ for node.MoveToParent() {
+ if !a.Predicate(node) {
+ continue
+ }
+ return node
+ }
+ return nil
+ }
+ }
+
+ if node := a.iterator(); node != nil {
+ return node
+ }
+ a.iterator = nil
+ }
+}
+
+func (a *ancestorQuery) Evaluate(t iterator) interface{} {
+ a.Input.Evaluate(t)
+ a.iterator = nil
+ return a
+}
+
+func (a *ancestorQuery) Test(n NodeNavigator) bool {
+ return a.Predicate(n)
+}
+
+func (a *ancestorQuery) Clone() query {
+ return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate}
+}
+
+// attributeQuery is an XPath attribute node query.(@*)
+type attributeQuery struct {
+ iterator func() NodeNavigator
+
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (a *attributeQuery) Select(t iterator) NodeNavigator {
+ for {
+ if a.iterator == nil {
+ node := a.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ a.iterator = func() NodeNavigator {
+ for {
+ onAttr := node.MoveToNextAttribute()
+ if !onAttr {
+ return nil
+ }
+ if a.Predicate(node) {
+ return node
+ }
+ }
+ }
+ }
+
+ if node := a.iterator(); node != nil {
+ return node
+ }
+ a.iterator = nil
+ }
+}
+
+func (a *attributeQuery) Evaluate(t iterator) interface{} {
+ a.Input.Evaluate(t)
+ a.iterator = nil
+ return a
+}
+
+func (a *attributeQuery) Test(n NodeNavigator) bool {
+ return a.Predicate(n)
+}
+
+func (a *attributeQuery) Clone() query {
+ return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate}
+}
+
+// childQuery is an XPath child node query.(child::*)
+type childQuery struct {
+ posit int
+ iterator func() NodeNavigator
+
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (c *childQuery) Select(t iterator) NodeNavigator {
+ for {
+ if c.iterator == nil {
+ c.posit = 0
+ node := c.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ first := true
+ c.iterator = func() NodeNavigator {
+ for {
+ if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) {
+ return nil
+ }
+ first = false
+ if c.Predicate(node) {
+ return node
+ }
+ }
+ }
+ }
+
+ if node := c.iterator(); node != nil {
+ c.posit++
+ return node
+ }
+ c.iterator = nil
+ }
+}
+
+func (c *childQuery) Evaluate(t iterator) interface{} {
+ c.Input.Evaluate(t)
+ c.iterator = nil
+ return c
+}
+
+func (c *childQuery) Test(n NodeNavigator) bool {
+ return c.Predicate(n)
+}
+
+func (c *childQuery) Clone() query {
+ return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate}
+}
+
+// position returns a position of current NodeNavigator.
+func (c *childQuery) position() int {
+ return c.posit
+}
+
+// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*)
+type descendantQuery struct {
+ iterator func() NodeNavigator
+ posit int
+
+ Self bool
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (d *descendantQuery) Select(t iterator) NodeNavigator {
+ for {
+ if d.iterator == nil {
+ d.posit = 0
+ node := d.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ level := 0
+ first := true
+ d.iterator = func() NodeNavigator {
+ if first && d.Self {
+ first = false
+ if d.Predicate(node) {
+ return node
+ }
+ }
+
+ for {
+ if node.MoveToChild() {
+ level++
+ } else {
+ for {
+ if level == 0 {
+ return nil
+ }
+ if node.MoveToNext() {
+ break
+ }
+ node.MoveToParent()
+ level--
+ }
+ }
+ if d.Predicate(node) {
+ return node
+ }
+ }
+ }
+ }
+
+ if node := d.iterator(); node != nil {
+ d.posit++
+ return node
+ }
+ d.iterator = nil
+ }
+}
+
+func (d *descendantQuery) Evaluate(t iterator) interface{} {
+ d.Input.Evaluate(t)
+ d.iterator = nil
+ return d
+}
+
+func (d *descendantQuery) Test(n NodeNavigator) bool {
+ return d.Predicate(n)
+}
+
+// position returns a position of current NodeNavigator.
+func (d *descendantQuery) position() int {
+ return d.posit
+}
+
+func (d *descendantQuery) Clone() query {
+ return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate}
+}
+
+// followingQuery is an XPath following node query.(following::*|following-sibling::*)
+type followingQuery struct {
+ iterator func() NodeNavigator
+
+ Input query
+ Sibling bool // The matching sibling node of current node.
+ Predicate func(NodeNavigator) bool
+}
+
+func (f *followingQuery) Select(t iterator) NodeNavigator {
+ for {
+ if f.iterator == nil {
+ node := f.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ if f.Sibling {
+ f.iterator = func() NodeNavigator {
+ for {
+ if !node.MoveToNext() {
+ return nil
+ }
+ if f.Predicate(node) {
+ return node
+ }
+ }
+ }
+ } else {
+ var q query // descendant query
+ f.iterator = func() NodeNavigator {
+ for {
+ if q == nil {
+ for !node.MoveToNext() {
+ if !node.MoveToParent() {
+ return nil
+ }
+ }
+ q = &descendantQuery{
+ Self: true,
+ Input: &contextQuery{},
+ Predicate: f.Predicate,
+ }
+ t.Current().MoveTo(node)
+ }
+ if node := q.Select(t); node != nil {
+ return node
+ }
+ q = nil
+ }
+ }
+ }
+ }
+
+ if node := f.iterator(); node != nil {
+ return node
+ }
+ f.iterator = nil
+ }
+}
+
+func (f *followingQuery) Evaluate(t iterator) interface{} {
+ f.Input.Evaluate(t)
+ return f
+}
+
+func (f *followingQuery) Test(n NodeNavigator) bool {
+ return f.Predicate(n)
+}
+
+func (f *followingQuery) Clone() query {
+ return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate}
+}
+
+// precedingQuery is an XPath preceding node query.(preceding::*)
+type precedingQuery struct {
+ iterator func() NodeNavigator
+ Input query
+ Sibling bool // The matching sibling node of current node.
+ Predicate func(NodeNavigator) bool
+}
+
+func (p *precedingQuery) Select(t iterator) NodeNavigator {
+ for {
+ if p.iterator == nil {
+ node := p.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ if p.Sibling {
+ p.iterator = func() NodeNavigator {
+ for {
+ for !node.MoveToPrevious() {
+ return nil
+ }
+ if p.Predicate(node) {
+ return node
+ }
+ }
+ }
+ } else {
+ var q query
+ p.iterator = func() NodeNavigator {
+ for {
+ if q == nil {
+ for !node.MoveToPrevious() {
+ if !node.MoveToParent() {
+ return nil
+ }
+ }
+ q = &descendantQuery{
+ Self: true,
+ Input: &contextQuery{},
+ Predicate: p.Predicate,
+ }
+ t.Current().MoveTo(node)
+ }
+ if node := q.Select(t); node != nil {
+ return node
+ }
+ q = nil
+ }
+ }
+ }
+ }
+ if node := p.iterator(); node != nil {
+ return node
+ }
+ p.iterator = nil
+ }
+}
+
+func (p *precedingQuery) Evaluate(t iterator) interface{} {
+ p.Input.Evaluate(t)
+ return p
+}
+
+func (p *precedingQuery) Test(n NodeNavigator) bool {
+ return p.Predicate(n)
+}
+
+func (p *precedingQuery) Clone() query {
+ return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate}
+}
+
+// parentQuery is an XPath parent node query.(parent::*)
+type parentQuery struct {
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (p *parentQuery) Select(t iterator) NodeNavigator {
+ for {
+ node := p.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+ node = node.Copy()
+ if node.MoveToParent() && p.Predicate(node) {
+ return node
+ }
+ }
+}
+
+func (p *parentQuery) Evaluate(t iterator) interface{} {
+ p.Input.Evaluate(t)
+ return p
+}
+
+func (p *parentQuery) Clone() query {
+ return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate}
+}
+
+func (p *parentQuery) Test(n NodeNavigator) bool {
+ return p.Predicate(n)
+}
+
+// selfQuery is an Self node query.(self::*)
+type selfQuery struct {
+ Input query
+ Predicate func(NodeNavigator) bool
+}
+
+func (s *selfQuery) Select(t iterator) NodeNavigator {
+ for {
+ node := s.Input.Select(t)
+ if node == nil {
+ return nil
+ }
+
+ if s.Predicate(node) {
+ return node
+ }
+ }
+}
+
+func (s *selfQuery) Evaluate(t iterator) interface{} {
+ s.Input.Evaluate(t)
+ return s
+}
+
+func (s *selfQuery) Test(n NodeNavigator) bool {
+ return s.Predicate(n)
+}
+
+func (s *selfQuery) Clone() query {
+ return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate}
+}
+
+// filterQuery is an XPath query for predicate filter.
+type filterQuery struct {
+ Input query
+ Predicate query
+}
+
+func (f *filterQuery) do(t iterator) bool {
+ val := reflect.ValueOf(f.Predicate.Evaluate(t))
+ switch val.Kind() {
+ case reflect.Bool:
+ return val.Bool()
+ case reflect.String:
+ return len(val.String()) > 0
+ case reflect.Float64:
+ pt := float64(getNodePosition(f.Input))
+ return int(val.Float()) == int(pt)
+ default:
+ if q, ok := f.Predicate.(query); ok {
+ return q.Select(t) != nil
+ }
+ }
+ return false
+}
+
+func (f *filterQuery) Select(t iterator) NodeNavigator {
+ for {
+ node := f.Input.Select(t)
+ if node == nil {
+ return node
+ }
+ node = node.Copy()
+ //fmt.Println(node.LocalName())
+
+ t.Current().MoveTo(node)
+ if f.do(t) {
+ return node
+ }
+ }
+}
+
+func (f *filterQuery) Evaluate(t iterator) interface{} {
+ f.Input.Evaluate(t)
+ return f
+}
+
+func (f *filterQuery) Clone() query {
+ return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()}
+}
+
+// functionQuery is an XPath function that call a function to returns
+// value of current NodeNavigator node.
+type functionQuery struct {
+ Input query // Node Set
+ Func func(query, iterator) interface{} // The xpath function.
+}
+
+func (f *functionQuery) Select(t iterator) NodeNavigator {
+ return nil
+}
+
+// Evaluate call a specified function that will returns the
+// following value type: number,string,boolean.
+func (f *functionQuery) Evaluate(t iterator) interface{} {
+ return f.Func(f.Input, t)
+}
+
+func (f *functionQuery) Clone() query {
+ return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
+}
+
+// constantQuery is an XPath constant operand.
+type constantQuery struct {
+ Val interface{}
+}
+
+func (c *constantQuery) Select(t iterator) NodeNavigator {
+ return nil
+}
+
+func (c *constantQuery) Evaluate(t iterator) interface{} {
+ return c.Val
+}
+
+func (c *constantQuery) Clone() query {
+ return c
+}
+
+// logicalQuery is an XPath logical expression.
+type logicalQuery struct {
+ Left, Right query
+
+ Do func(iterator, interface{}, interface{}) interface{}
+}
+
+func (l *logicalQuery) Select(t iterator) NodeNavigator {
+ // When a XPath expr is logical expression.
+ node := t.Current().Copy()
+ val := l.Evaluate(t)
+ switch val.(type) {
+ case bool:
+ if val.(bool) == true {
+ return node
+ }
+ }
+ return nil
+}
+
+func (l *logicalQuery) Evaluate(t iterator) interface{} {
+ m := l.Left.Evaluate(t)
+ n := l.Right.Evaluate(t)
+ return l.Do(t, m, n)
+}
+
+func (l *logicalQuery) Clone() query {
+ return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do}
+}
+
+// numericQuery is an XPath numeric operator expression.
+type numericQuery struct {
+ Left, Right query
+
+ Do func(interface{}, interface{}) interface{}
+}
+
+func (n *numericQuery) Select(t iterator) NodeNavigator {
+ return nil
+}
+
+func (n *numericQuery) Evaluate(t iterator) interface{} {
+ m := n.Left.Evaluate(t)
+ k := n.Right.Evaluate(t)
+ return n.Do(m, k)
+}
+
+func (n *numericQuery) Clone() query {
+ return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do}
+}
+
+type booleanQuery struct {
+ IsOr bool
+ Left, Right query
+ iterator func() NodeNavigator
+}
+
+func (b *booleanQuery) Select(t iterator) NodeNavigator {
+ if b.iterator == nil {
+ var list []NodeNavigator
+ i := 0
+ root := t.Current().Copy()
+ if b.IsOr {
+ for {
+ node := b.Left.Select(t)
+ if node == nil {
+ break
+ }
+ node = node.Copy()
+ list = append(list, node)
+ }
+ t.Current().MoveTo(root)
+ for {
+ node := b.Right.Select(t)
+ if node == nil {
+ break
+ }
+ node = node.Copy()
+ list = append(list, node)
+ }
+ } else {
+ var m []NodeNavigator
+ var n []NodeNavigator
+ for {
+ node := b.Left.Select(t)
+ if node == nil {
+ break
+ }
+ node = node.Copy()
+ list = append(m, node)
+ }
+ t.Current().MoveTo(root)
+ for {
+ node := b.Right.Select(t)
+ if node == nil {
+ break
+ }
+ node = node.Copy()
+ list = append(n, node)
+ }
+ for _, k := range m {
+ for _, j := range n {
+ if k == j {
+ list = append(list, k)
+ }
+ }
+ }
+ }
+
+ b.iterator = func() NodeNavigator {
+ if i >= len(list) {
+ return nil
+ }
+ node := list[i]
+ i++
+ return node
+ }
+ }
+ return b.iterator()
+}
+
+func (b *booleanQuery) Evaluate(t iterator) interface{} {
+ m := b.Left.Evaluate(t)
+ left := asBool(t, m)
+ if b.IsOr && left {
+ return true
+ } else if !b.IsOr && !left {
+ return false
+ }
+ m = b.Right.Evaluate(t)
+ return asBool(t, m)
+}
+
+func (b *booleanQuery) Clone() query {
+ return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
+}
+
+type unionQuery struct {
+ Left, Right query
+ iterator func() NodeNavigator
+}
+
+func (u *unionQuery) Select(t iterator) NodeNavigator {
+ if u.iterator == nil {
+ var m = make(map[uint64]NodeNavigator)
+ root := t.Current().Copy()
+ for {
+ node := u.Left.Select(t)
+ if node == nil {
+ break
+ }
+ code := getHashCode(node.Copy())
+ if _, ok := m[code]; !ok {
+ m[code] = node.Copy()
+ }
+ }
+ t.Current().MoveTo(root)
+ for {
+ node := u.Right.Select(t)
+ if node == nil {
+ break
+ }
+ code := getHashCode(node.Copy())
+ if _, ok := m[code]; !ok {
+ m[code] = node.Copy()
+ }
+ }
+ list := make([]NodeNavigator, len(m))
+ var i int
+ for _, v := range m {
+ list[i] = v
+ i++
+ }
+ i = 0
+ u.iterator = func() NodeNavigator {
+ if i >= len(list) {
+ return nil
+ }
+ node := list[i]
+ i++
+ return node
+ }
+ }
+ return u.iterator()
+}
+
+func (u *unionQuery) Evaluate(t iterator) interface{} {
+ u.iterator = nil
+ u.Left.Evaluate(t)
+ u.Right.Evaluate(t)
+ return u
+}
+
+func (u *unionQuery) Clone() query {
+ return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()}
+}
+
+func getHashCode(n NodeNavigator) uint64 {
+ var sb bytes.Buffer
+ switch n.NodeType() {
+ case AttributeNode, TextNode, CommentNode:
+ sb.WriteString(fmt.Sprintf("%s=%s", n.LocalName(), n.Value()))
+ if n.MoveToParent() {
+ sb.WriteString(n.LocalName())
+ }
+ case ElementNode:
+ sb.WriteString(n.Prefix() + n.LocalName())
+ d := 1
+ for n.MoveToPrevious() {
+ d++
+ }
+ sb.WriteString(fmt.Sprintf("-%d", d))
+
+ for n.MoveToParent() {
+ d = 1
+ for n.MoveToPrevious() {
+ d++
+ }
+ sb.WriteString(fmt.Sprintf("-%d", d))
+ }
+ }
+ h := fnv.New64a()
+ h.Write([]byte(sb.String()))
+ return h.Sum64()
+}
+
+func getNodePosition(q query) int {
+ type Position interface {
+ position() int
+ }
+ if count, ok := q.(Position); ok {
+ return count.position()
+ }
+ return 1
+}
diff --git a/vendor/github.com/antchfx/xpath/xpath.go b/vendor/github.com/antchfx/xpath/xpath.go
new file mode 100644
index 0000000..7e3f52c
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/xpath.go
@@ -0,0 +1,157 @@
+package xpath
+
+import (
+ "errors"
+)
+
+// NodeType represents a type of XPath node.
+type NodeType int
+
+const (
+ // RootNode is a root node of the XML document or node tree.
+ RootNode NodeType = iota
+
+ // ElementNode is an element, such as .
+ ElementNode
+
+ // AttributeNode is an attribute, such as id='123'.
+ AttributeNode
+
+ // TextNode is the text content of a node.
+ TextNode
+
+ // CommentNode is a comment node, such as
+ CommentNode
+
+ // allNode is any types of node, used by xpath package only to predicate match.
+ allNode
+)
+
+// NodeNavigator provides cursor model for navigating XML data.
+type NodeNavigator interface {
+ // NodeType returns the XPathNodeType of the current node.
+ NodeType() NodeType
+
+ // LocalName gets the Name of the current node.
+ LocalName() string
+
+ // Prefix returns namespace prefix associated with the current node.
+ Prefix() string
+
+ // Value gets the value of current node.
+ Value() string
+
+ // Copy does a deep copy of the NodeNavigator and all its components.
+ Copy() NodeNavigator
+
+ // MoveToRoot moves the NodeNavigator to the root node of the current node.
+ MoveToRoot()
+
+ // MoveToParent moves the NodeNavigator to the parent node of the current node.
+ MoveToParent() bool
+
+ // MoveToNextAttribute moves the NodeNavigator to the next attribute on current node.
+ MoveToNextAttribute() bool
+
+ // MoveToChild moves the NodeNavigator to the first child node of the current node.
+ MoveToChild() bool
+
+ // MoveToFirst moves the NodeNavigator to the first sibling node of the current node.
+ MoveToFirst() bool
+
+ // MoveToNext moves the NodeNavigator to the next sibling node of the current node.
+ MoveToNext() bool
+
+ // MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node.
+ MoveToPrevious() bool
+
+ // MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator.
+ MoveTo(NodeNavigator) bool
+}
+
+// NodeIterator holds all matched Node object.
+type NodeIterator struct {
+ node NodeNavigator
+ query query
+}
+
+// Current returns current node which matched.
+func (t *NodeIterator) Current() NodeNavigator {
+ return t.node
+}
+
+// MoveNext moves Navigator to the next match node.
+func (t *NodeIterator) MoveNext() bool {
+ n := t.query.Select(t)
+ if n != nil {
+ if !t.node.MoveTo(n) {
+ t.node = n.Copy()
+ }
+ return true
+ }
+ return false
+}
+
+// Select selects a node set using the specified XPath expression.
+// This method is deprecated, recommend using Expr.Select() method instead.
+func Select(root NodeNavigator, expr string) *NodeIterator {
+ exp, err := Compile(expr)
+ if err != nil {
+ panic(err)
+ }
+ return exp.Select(root)
+}
+
+// Expr is an XPath expression for query.
+type Expr struct {
+ s string
+ q query
+}
+
+type iteratorFunc func() NodeNavigator
+
+func (f iteratorFunc) Current() NodeNavigator {
+ return f()
+}
+
+// Evaluate returns the result of the expression.
+// The result type of the expression is one of the follow: bool,float64,string,NodeIterator).
+func (expr *Expr) Evaluate(root NodeNavigator) interface{} {
+ val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root }))
+ switch val.(type) {
+ case query:
+ return &NodeIterator{query: expr.q.Clone(), node: root}
+ }
+ return val
+}
+
+// Select selects a node set using the specified XPath expression.
+func (expr *Expr) Select(root NodeNavigator) *NodeIterator {
+ return &NodeIterator{query: expr.q.Clone(), node: root}
+}
+
+// String returns XPath expression string.
+func (expr *Expr) String() string {
+ return expr.s
+}
+
+// Compile compiles an XPath expression string.
+func Compile(expr string) (*Expr, error) {
+ if expr == "" {
+ return nil, errors.New("expr expression is nil")
+ }
+ qy, err := build(expr)
+ if err != nil {
+ return nil, err
+ }
+ return &Expr{s: expr, q: qy}, nil
+}
+
+// MustCompile compiles an XPath expression string and ignored error.
+func MustCompile(expr string) *Expr {
+ exp, err := Compile(expr)
+ if err != nil {
+ return nil
+ }
+ return exp
+}
diff --git a/vendor/github.com/antchfx/xpath/xpath_test.go b/vendor/github.com/antchfx/xpath/xpath_test.go
new file mode 100644
index 0000000..63e5b8b
--- /dev/null
+++ b/vendor/github.com/antchfx/xpath/xpath_test.go
@@ -0,0 +1,709 @@
+package xpath
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+var (
+ html = example()
+ html2 = example2()
+)
+
+func TestCompile(t *testing.T) {
+ var err error
+ _, err = Compile("//a")
+ if err != nil {
+ t.Fatalf("//a should be correct but got error %s", err)
+ }
+ _, err = Compile("//a[id=']/span")
+ if err == nil {
+ t.Fatal("//a[id=] should be got correct but is nil")
+ }
+ _, err = Compile("//ul/li/@class")
+ if err != nil {
+ t.Fatalf("//ul/li/@class should be correct but got error %s", err)
+ }
+ _, err = Compile("/a/b/(c, .[not(c)])")
+ if err != nil {
+ t.Fatalf("/a/b/(c, .[not(c)]) should be correct but got error %s", err)
+ }
+}
+func TestSelf(t *testing.T) {
+ testXPath(t, html, ".", "html")
+ testXPath(t, html.FirstChild, ".", "head")
+ testXPath(t, html, "self::*", "html")
+ testXPath(t, html.LastChild, "self::body", "body")
+ testXPath2(t, html, "//body/./ul/li/a", 3)
+}
+
+func TestParent(t *testing.T) {
+ testXPath(t, html.LastChild, "..", "html")
+ testXPath(t, html.LastChild, "parent::*", "html")
+ a := selectNode(html, "//li/a")
+ testXPath(t, a, "parent::*", "li")
+ testXPath(t, html, "//title/parent::head", "head")
+}
+
+func TestAttribute(t *testing.T) {
+ testXPath(t, html, "@lang='en'", "html")
+ testXPath2(t, html, "@lang='zh'", 0)
+ testXPath2(t, html, "//@href", 3)
+ testXPath2(t, html, "//a[@*]", 3)
+}
+
+func TestSequence(t *testing.T) {
+ testXPath2(t, html2, "//table/tbody/tr/td/(para, .[not(para)])", 9)
+ testXPath2(t, html2, "//table/tbody/tr/td/(para, .[not(para)], ..)", 12)
+}
+
+func TestRelativePath(t *testing.T) {
+ testXPath(t, html, "head", "head")
+ testXPath(t, html, "/head", "head")
+ testXPath(t, html, "body//li", "li")
+ testXPath(t, html, "/head/title", "title")
+
+ testXPath2(t, html, "/body/ul/li/a", 3)
+ testXPath(t, html, "//title", "title")
+ testXPath(t, html, "//title/..", "head")
+ testXPath(t, html, "//title/../..", "html")
+ testXPath2(t, html, "//a[@href]", 3)
+ testXPath(t, html, "//ul/../footer", "footer")
+}
+
+func TestChild(t *testing.T) {
+ testXPath(t, html, "/child::head", "head")
+ testXPath(t, html, "/child::head/child::title", "title")
+ testXPath(t, html, "//title/../child::title", "title")
+ testXPath(t, html.Parent, "//child::*", "html")
+}
+
+func TestDescendant(t *testing.T) {
+ testXPath2(t, html, "descendant::*", 15)
+ testXPath2(t, html, "/head/descendant::*", 2)
+ testXPath2(t, html, "//ul/descendant::*", 7) //
+
+
+
+ */
+ doc := createNode("", RootNode)
+ xhtml := doc.createChildNode("html", ElementNode)
+ xhtml.addAttribute("lang", "en")
+
+ // The HTML head section.
+ head := xhtml.createChildNode("head", ElementNode)
+ n := head.createChildNode("title", ElementNode)
+ n = n.createChildNode("Hello", TextNode)
+ n = head.createChildNode("meta", ElementNode)
+ n.addAttribute("name", "language")
+ n.addAttribute("content", "en")
+ // The HTML body section.
+ body := xhtml.createChildNode("body", ElementNode)
+ n = body.createChildNode("h1", ElementNode)
+ n = n.createChildNode("\nThis is a H1\n", TextNode)
+ ul := body.createChildNode("ul", ElementNode)
+ n = ul.createChildNode("li", ElementNode)
+ n = n.createChildNode("a", ElementNode)
+ n.addAttribute("id", "1")
+ n.addAttribute("href", "/")
+ n = n.createChildNode("Home", TextNode)
+ n = ul.createChildNode("li", ElementNode)
+ n = n.createChildNode("a", ElementNode)
+ n.addAttribute("id", "2")
+ n.addAttribute("href", "/about")
+ n = n.createChildNode("about", TextNode)
+ n = ul.createChildNode("li", ElementNode)
+ n = n.createChildNode("a", ElementNode)
+ n.addAttribute("id", "3")
+ n.addAttribute("href", "/account")
+ n = n.createChildNode("login", TextNode)
+ n = ul.createChildNode("li", ElementNode)
+
+ n = body.createChildNode("p", ElementNode)
+ n = n.createChildNode("Hello,This is an example for gxpath.", TextNode)
+
+ n = body.createChildNode("footer", ElementNode)
+ n = n.createChildNode("footer script", TextNode)
+
+ return xhtml
+}
diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore
new file mode 100644
index 0000000..a3062be
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.gitignore
@@ -0,0 +1 @@
+.vscode/*
diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml
new file mode 100644
index 0000000..9c35955
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - 1.x
+script:
+ - GOOS=windows go install github.com/chzyer/readline/example/...
+ - GOOS=linux go install github.com/chzyer/readline/example/...
+ - GOOS=darwin go install github.com/chzyer/readline/example/...
+ - go test -race -v
diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md
new file mode 100644
index 0000000..14ff5be
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/CHANGELOG.md
@@ -0,0 +1,58 @@
+# ChangeLog
+
+### 1.4 - 2016-07-25
+
+* [#60][60] Support dynamic autocompletion
+* Fix ANSI parser on Windows
+* Fix wrong column width in complete mode on Windows
+* Remove dependent package "golang.org/x/crypto/ssh/terminal"
+
+### 1.3 - 2016-05-09
+
+* [#38][38] add SetChildren for prefix completer interface
+* [#42][42] improve multiple lines compatibility
+* [#43][43] remove sub-package(runes) for gopkg compatibility
+* [#46][46] Auto complete with space prefixed line
+* [#48][48] support suspend process (ctrl+Z)
+* [#49][49] fix bug that check equals with previous command
+* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty
+
+### 1.2 - 2016-03-05
+
+* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib)
+* [#23][23], support stdin remapping
+* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM.
+* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines.
+* Supports performs even stdin/stdout is not a tty.
+* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api.
+* [#28][28], fixes the history is not working as expected.
+* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)`
+
+### 1.1 - 2015-11-20
+
+* [#12][12] Add support for key ``/``/``
+* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`.
+* Bugs fixed for `PrefixCompleter`
+* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience.
+* Customable Interrupt/EOF prompt in `Config`
+* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices
+* Provides a new password user experience(`readline.ReadPasswordEx()`).
+
+### 1.0 - 2015-10-14
+
+* Initial public release.
+
+[12]: https://github.com/chzyer/readline/pull/12
+[17]: https://github.com/chzyer/readline/pull/17
+[23]: https://github.com/chzyer/readline/pull/23
+[27]: https://github.com/chzyer/readline/pull/27
+[28]: https://github.com/chzyer/readline/pull/28
+[33]: https://github.com/chzyer/readline/pull/33
+[38]: https://github.com/chzyer/readline/pull/38
+[42]: https://github.com/chzyer/readline/pull/42
+[43]: https://github.com/chzyer/readline/pull/43
+[46]: https://github.com/chzyer/readline/pull/46
+[48]: https://github.com/chzyer/readline/pull/48
+[49]: https://github.com/chzyer/readline/pull/49
+[53]: https://github.com/chzyer/readline/pull/53
+[60]: https://github.com/chzyer/readline/pull/60
diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE
new file mode 100644
index 0000000..c9afab3
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Chzyer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
new file mode 100644
index 0000000..fab974b
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/README.md
@@ -0,0 +1,114 @@
+[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline)
+[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md)
+[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases)
+[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline)
+[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers)
+[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors)
+
+
tag")
+ }
+
+ if paragraphCallbackCount != 2 {
+ t.Error("Failed to find all
tags")
+ }
+}
+
+func BenchmarkOnHTML(b *testing.B) {
+ ts := newTestServer()
+ defer ts.Close()
+
+ c := NewCollector()
+ c.OnHTML("p", func(_ *HTMLElement) {})
+
+ for n := 0; n < b.N; n++ {
+ c.Visit(fmt.Sprintf("%s/html?q=%d", ts.URL, n))
+ }
+}
+
+func BenchmarkOnXML(b *testing.B) {
+ ts := newTestServer()
+ defer ts.Close()
+
+ c := NewCollector()
+ c.OnXML("//p", func(_ *XMLElement) {})
+
+ for n := 0; n < b.N; n++ {
+ c.Visit(fmt.Sprintf("%s/html?q=%d", ts.URL, n))
+ }
+}
+
+func BenchmarkOnResponse(b *testing.B) {
+ ts := newTestServer()
+ defer ts.Close()
+
+ c := NewCollector()
+ c.AllowURLRevisit = true
+ c.OnResponse(func(_ *Response) {})
+
+ for n := 0; n < b.N; n++ {
+ c.Visit(ts.URL)
+ }
+}
diff --git a/vendor/github.com/gocolly/colly/context.go b/vendor/github.com/gocolly/colly/context.go
new file mode 100644
index 0000000..4bc11b9
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/context.go
@@ -0,0 +1,87 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "sync"
+)
+
+// Context provides a tiny layer for passing data between callbacks
+type Context struct {
+ contextMap map[string]interface{}
+ lock *sync.RWMutex
+}
+
+// NewContext initializes a new Context instance
+func NewContext() *Context {
+ return &Context{
+ contextMap: make(map[string]interface{}),
+ lock: &sync.RWMutex{},
+ }
+}
+
+// UnmarshalBinary decodes Context value to nil
+// This function is used by request caching
+func (c *Context) UnmarshalBinary(_ []byte) error {
+ return nil
+}
+
+// MarshalBinary encodes Context value
+// This function is used by request caching
+func (c *Context) MarshalBinary() (_ []byte, _ error) {
+ return nil, nil
+}
+
+// Put stores a value of any type in Context
+func (c *Context) Put(key string, value interface{}) {
+ c.lock.Lock()
+ c.contextMap[key] = value
+ c.lock.Unlock()
+}
+
+// Get retrieves a string value from Context.
+// Get returns an empty string if key not found
+func (c *Context) Get(key string) string {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if v, ok := c.contextMap[key]; ok {
+ return v.(string)
+ }
+ return ""
+}
+
+// GetAny retrieves a value from Context.
+// GetAny returns nil if key not found
+func (c *Context) GetAny(key string) interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if v, ok := c.contextMap[key]; ok {
+ return v
+ }
+ return nil
+}
+
+// ForEach iterate context
+func (c *Context) ForEach(fn func(k string, v interface{}) interface{}) []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ ret := make([]interface{}, 0, len(c.contextMap))
+ for k, v := range c.contextMap {
+ ret = append(ret, fn(k, v))
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/gocolly/colly/context_test.go b/vendor/github.com/gocolly/colly/context_test.go
new file mode 100644
index 0000000..07d7d85
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/context_test.go
@@ -0,0 +1,39 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "strconv"
+ "testing"
+)
+
+func TestContextIteration(t *testing.T) {
+ ctx := NewContext()
+ for i := 0; i < 10; i++ {
+ ctx.Put(strconv.Itoa(i), i)
+ }
+ values := ctx.ForEach(func(k string, v interface{}) interface{} {
+ return v.(int)
+ })
+ if len(values) != 10 {
+ t.Fatal("fail to iterate context")
+ }
+ for _, i := range values {
+ v := i.(int)
+ if v != ctx.GetAny(strconv.Itoa(v)).(int) {
+ t.Fatal("value not equal")
+ }
+ }
+}
diff --git a/vendor/github.com/gocolly/colly/debug/debug.go b/vendor/github.com/gocolly/colly/debug/debug.go
new file mode 100644
index 0000000..705d0f7
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/debug.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+// Event represents an action inside a collector
+type Event struct {
+ // Type is the type of the event
+ Type string
+ // RequestID identifies the HTTP request of the Event
+ RequestID uint32
+ // CollectorID identifies the collector of the Event
+ CollectorID uint32
+ // Values contains the event's key-value pairs. Different type of events
+ // can return different key-value pairs
+ Values map[string]string
+}
+
+// Debugger is an interface for different type of debugging backends
+type Debugger interface {
+ // Init initializes the backend
+ Init() error
+ // Event receives a new collector event.
+ Event(e *Event)
+}
diff --git a/vendor/github.com/gocolly/colly/debug/logdebugger.go b/vendor/github.com/gocolly/colly/debug/logdebugger.go
new file mode 100644
index 0000000..f866b6d
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/logdebugger.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+import (
+ "io"
+ "log"
+ "os"
+ "sync/atomic"
+ "time"
+)
+
+// LogDebugger is the simplest debugger which prints log messages to the STDERR
+type LogDebugger struct {
+ // Output is the log destination, anything can be used which implements them
+ // io.Writer interface. Leave it blank to use STDERR
+ Output io.Writer
+ // Prefix appears at the beginning of each generated log line
+ Prefix string
+ // Flag defines the logging properties.
+ Flag int
+ logger *log.Logger
+ counter int32
+ start time.Time
+}
+
+// Init initializes the LogDebugger
+func (l *LogDebugger) Init() error {
+ l.counter = 0
+ l.start = time.Now()
+ if l.Output == nil {
+ l.Output = os.Stderr
+ }
+ l.logger = log.New(l.Output, l.Prefix, l.Flag)
+ return nil
+}
+
+// Event receives Collector events and prints them to STDERR
+func (l *LogDebugger) Event(e *Event) {
+ i := atomic.AddInt32(&l.counter, 1)
+ l.logger.Printf("[%06d] %d [%6d - %s] %q (%s)\n", i, e.CollectorID, e.RequestID, e.Type, e.Values, time.Since(l.start))
+}
diff --git a/vendor/github.com/gocolly/colly/debug/webdebugger.go b/vendor/github.com/gocolly/colly/debug/webdebugger.go
new file mode 100644
index 0000000..e246361
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/debug/webdebugger.go
@@ -0,0 +1,146 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+import (
+ "encoding/json"
+ "log"
+ "net/http"
+ "time"
+)
+
+// WebDebugger is a web based debuging frontend for colly
+type WebDebugger struct {
+ // Address is the address of the web server. It is 127.0.0.1:7676 by default.
+ Address string
+ initialized bool
+ CurrentRequests map[uint32]requestInfo
+ RequestLog []requestInfo
+}
+
+type requestInfo struct {
+ URL string
+ Started time.Time
+ Duration time.Duration
+ ResponseStatus string
+ ID uint32
+ CollectorID uint32
+}
+
+// Init initializes the WebDebugger
+func (w *WebDebugger) Init() error {
+ if w.initialized {
+ return nil
+ }
+ defer func() {
+ w.initialized = true
+ }()
+ if w.Address == "" {
+ w.Address = "127.0.0.1:7676"
+ }
+ w.RequestLog = make([]requestInfo, 0)
+ w.CurrentRequests = make(map[uint32]requestInfo)
+ http.HandleFunc("/", w.indexHandler)
+ http.HandleFunc("/status", w.statusHandler)
+ log.Println("Starting debug webserver on", w.Address)
+ go http.ListenAndServe(w.Address, nil)
+ return nil
+}
+
+// Event updates the debugger's status
+func (w *WebDebugger) Event(e *Event) {
+ switch e.Type {
+ case "request":
+ w.CurrentRequests[e.RequestID] = requestInfo{
+ URL: e.Values["url"],
+ Started: time.Now(),
+ ID: e.RequestID,
+ CollectorID: e.CollectorID,
+ }
+ case "response", "error":
+ r := w.CurrentRequests[e.RequestID]
+ r.Duration = time.Since(r.Started)
+ r.ResponseStatus = e.Values["status"]
+ w.RequestLog = append(w.RequestLog, r)
+ delete(w.CurrentRequests, e.RequestID)
+ }
+}
+
+func (w *WebDebugger) indexHandler(wr http.ResponseWriter, r *http.Request) {
+ wr.Write([]byte(`
+
+
+
+
+
+`))
+}
+
+func (w *WebDebugger) statusHandler(wr http.ResponseWriter, r *http.Request) {
+ jsonData, err := json.MarshalIndent(w, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ wr.Write(jsonData)
+}
diff --git a/vendor/github.com/gocolly/colly/htmlelement.go b/vendor/github.com/gocolly/colly/htmlelement.go
new file mode 100644
index 0000000..92484bd
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/htmlelement.go
@@ -0,0 +1,120 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "strings"
+
+ "github.com/PuerkitoBio/goquery"
+ "golang.org/x/net/html"
+)
+
+// HTMLElement is the representation of a HTML tag.
+type HTMLElement struct {
+ // Name is the name of the tag
+ Name string
+ Text string
+ attributes []html.Attribute
+ // Request is the request object of the element's HTML document
+ Request *Request
+ // Response is the Response object of the element's HTML document
+ Response *Response
+ // DOM is the goquery parsed DOM object of the page. DOM is relative
+ // to the current HTMLElement
+ DOM *goquery.Selection
+ // Index stores the position of the current element within all the elements matched by an OnHTML callback
+ Index int
+}
+
+// NewHTMLElementFromSelectionNode creates a HTMLElement from a goquery.Selection Node.
+func NewHTMLElementFromSelectionNode(resp *Response, s *goquery.Selection, n *html.Node, idx int) *HTMLElement {
+ return &HTMLElement{
+ Name: n.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: goquery.NewDocumentFromNode(n).Text(),
+ DOM: s,
+ Index: idx,
+ attributes: n.Attr,
+ }
+}
+
+// Attr returns the selected attribute of a HTMLElement or empty string
+// if no attribute found
+func (h *HTMLElement) Attr(k string) string {
+ for _, a := range h.attributes {
+ if a.Key == k {
+ return a.Val
+ }
+ }
+ return ""
+}
+
+// ChildText returns the concatenated and stripped text content of the matching
+// elements.
+func (h *HTMLElement) ChildText(goquerySelector string) string {
+ return strings.TrimSpace(h.DOM.Find(goquerySelector).Text())
+}
+
+// ChildAttr returns the stripped text content of the first matching
+// element's attribute.
+func (h *HTMLElement) ChildAttr(goquerySelector, attrName string) string {
+ if attr, ok := h.DOM.Find(goquerySelector).Attr(attrName); ok {
+ return strings.TrimSpace(attr)
+ }
+ return ""
+}
+
+// ChildAttrs returns the stripped text content of all the matching
+// element's attributes.
+func (h *HTMLElement) ChildAttrs(goquerySelector, attrName string) []string {
+ var res []string
+ h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
+ if attr, ok := s.Attr(attrName); ok {
+ res = append(res, strings.TrimSpace(attr))
+ }
+ })
+ return res
+}
+
+// ForEach iterates over the elements matched by the first argument
+// and calls the callback function on every HTMLElement match.
+func (h *HTMLElement) ForEach(goquerySelector string, callback func(int, *HTMLElement)) {
+ i := 0
+ h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
+ for _, n := range s.Nodes {
+ callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i))
+ i++
+ }
+ })
+}
+
+// ForEachWithBreak iterates over the elements matched by the first argument
+// and calls the callback function on every HTMLElement match.
+// It is identical to ForEach except that it is possible to break
+// out of the loop by returning false in the callback function. It returns the
+// current Selection object.
+func (h *HTMLElement) ForEachWithBreak(goquerySelector string, callback func(int, *HTMLElement) bool) {
+ i := 0
+ h.DOM.Find(goquerySelector).EachWithBreak(func(_ int, s *goquery.Selection) bool {
+ for _, n := range s.Nodes {
+ if callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i)) {
+ i++
+ return true
+ }
+ }
+ return false
+ })
+}
diff --git a/vendor/github.com/gocolly/colly/http_backend.go b/vendor/github.com/gocolly/colly/http_backend.go
new file mode 100644
index 0000000..77f1107
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/http_backend.go
@@ -0,0 +1,229 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "crypto/sha1"
+ "encoding/gob"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "compress/gzip"
+
+ "github.com/gobwas/glob"
+)
+
+type httpBackend struct {
+ LimitRules []*LimitRule
+ Client *http.Client
+ lock *sync.RWMutex
+}
+
+// LimitRule provides connection restrictions for domains.
+// Both DomainRegexp and DomainGlob can be used to specify
+// the included domains patterns, but at least one is required.
+// There can be two kind of limitations:
+// - Parallelism: Set limit for the number of concurrent requests to matching domains
+// - Delay: Wait specified amount of time between requests (parallelism is 1 in this case)
+type LimitRule struct {
+ // DomainRegexp is a regular expression to match against domains
+ DomainRegexp string
+ // DomainRegexp is a glob pattern to match against domains
+ DomainGlob string
+ // Delay is the duration to wait before creating a new request to the matching domains
+ Delay time.Duration
+ // RandomDelay is the extra randomized duration to wait added to Delay before creating a new request
+ RandomDelay time.Duration
+ // Parallelism is the number of the maximum allowed concurrent requests of the matching domains
+ Parallelism int
+ waitChan chan bool
+ compiledRegexp *regexp.Regexp
+ compiledGlob glob.Glob
+}
+
+// Init initializes the private members of LimitRule
+func (r *LimitRule) Init() error {
+ waitChanSize := 1
+ if r.Parallelism > 1 {
+ waitChanSize = r.Parallelism
+ }
+ r.waitChan = make(chan bool, waitChanSize)
+ hasPattern := false
+ if r.DomainRegexp != "" {
+ c, err := regexp.Compile(r.DomainRegexp)
+ if err != nil {
+ return err
+ }
+ r.compiledRegexp = c
+ hasPattern = true
+ }
+ if r.DomainGlob != "" {
+ c, err := glob.Compile(r.DomainGlob)
+ if err != nil {
+ return err
+ }
+ r.compiledGlob = c
+ hasPattern = true
+ }
+ if !hasPattern {
+ return ErrNoPattern
+ }
+ return nil
+}
+
+func (h *httpBackend) Init(jar http.CookieJar) {
+ rand.Seed(time.Now().UnixNano())
+ h.Client = &http.Client{
+ Jar: jar,
+ Timeout: 10 * time.Second,
+ }
+ h.lock = &sync.RWMutex{}
+}
+
+// Match checks that the domain parameter triggers the rule
+func (r *LimitRule) Match(domain string) bool {
+ match := false
+ if r.compiledRegexp != nil && r.compiledRegexp.MatchString(domain) {
+ match = true
+ }
+ if r.compiledGlob != nil && r.compiledGlob.Match(domain) {
+ match = true
+ }
+ return match
+}
+
+func (h *httpBackend) GetMatchingRule(domain string) *LimitRule {
+ if h.LimitRules == nil {
+ return nil
+ }
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+ for _, r := range h.LimitRules {
+ if r.Match(domain) {
+ return r
+ }
+ }
+ return nil
+}
+
+func (h *httpBackend) Cache(request *http.Request, bodySize int, cacheDir string) (*Response, error) {
+ if cacheDir == "" || request.Method != "GET" {
+ return h.Do(request, bodySize)
+ }
+ sum := sha1.Sum([]byte(request.URL.String()))
+ hash := hex.EncodeToString(sum[:])
+ dir := path.Join(cacheDir, hash[:2])
+ filename := path.Join(dir, hash)
+ if file, err := os.Open(filename); err == nil {
+ resp := new(Response)
+ err := gob.NewDecoder(file).Decode(resp)
+ file.Close()
+ if resp.StatusCode < 500 {
+ return resp, err
+ }
+ }
+ resp, err := h.Do(request, bodySize)
+ if err != nil || resp.StatusCode >= 500 {
+ return resp, err
+ }
+ if _, err := os.Stat(dir); err != nil {
+ if err := os.MkdirAll(dir, 0750); err != nil {
+ return resp, err
+ }
+ }
+ file, err := os.Create(filename + "~")
+ if err != nil {
+ return resp, err
+ }
+ if err := gob.NewEncoder(file).Encode(resp); err != nil {
+ file.Close()
+ return resp, err
+ }
+ file.Close()
+ return resp, os.Rename(filename+"~", filename)
+}
+
+func (h *httpBackend) Do(request *http.Request, bodySize int) (*Response, error) {
+ r := h.GetMatchingRule(request.URL.Host)
+ if r != nil {
+ r.waitChan <- true
+ defer func(r *LimitRule) {
+ randomDelay := time.Duration(0)
+ if r.RandomDelay != 0 {
+ randomDelay = time.Duration(rand.Int63n(int64(r.RandomDelay)))
+ }
+ time.Sleep(r.Delay + randomDelay)
+ <-r.waitChan
+ }(r)
+ }
+
+ res, err := h.Client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.Request != nil {
+ *request = *res.Request
+ }
+
+ var bodyReader io.Reader = res.Body
+ if bodySize > 0 {
+ bodyReader = io.LimitReader(bodyReader, int64(bodySize))
+ }
+ contentEncoding := strings.ToLower(res.Header.Get("Content-Encoding"))
+ if !res.Uncompressed && (strings.Contains(contentEncoding, "gzip") || (contentEncoding == "" && strings.Contains(strings.ToLower((res.Header.Get("Content-Type"))), "gzip"))) {
+ bodyReader, err = gzip.NewReader(bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ }
+ body, err := ioutil.ReadAll(bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ return &Response{
+ StatusCode: res.StatusCode,
+ Body: body,
+ Headers: &res.Header,
+ }, nil
+}
+
+func (h *httpBackend) Limit(rule *LimitRule) error {
+ h.lock.Lock()
+ if h.LimitRules == nil {
+ h.LimitRules = make([]*LimitRule, 0, 8)
+ }
+ h.LimitRules = append(h.LimitRules, rule)
+ h.lock.Unlock()
+ return rule.Init()
+}
+
+func (h *httpBackend) Limits(rules []*LimitRule) error {
+ for _, r := range rules {
+ if err := h.Limit(r); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gocolly/colly/request.go b/vendor/github.com/gocolly/colly/request.go
new file mode 100644
index 0000000..4b94cd2
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/request.go
@@ -0,0 +1,180 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync/atomic"
+)
+
+// Request is the representation of a HTTP request made by a Collector
+type Request struct {
+ // URL is the parsed URL of the HTTP request
+ URL *url.URL
+ // Headers contains the Request's HTTP headers
+ Headers *http.Header
+ // Ctx is a context between a Request and a Response
+ Ctx *Context
+ // Depth is the number of the parents of the request
+ Depth int
+ // Method is the HTTP method of the request
+ Method string
+ // Body is the request body which is used on POST/PUT requests
+ Body io.Reader
+ // ResponseCharacterencoding is the character encoding of the response body.
+ // Leave it blank to allow automatic character encoding of the response body.
+ // It is empty by default and it can be set in OnRequest callback.
+ ResponseCharacterEncoding string
+ // ID is the Unique identifier of the request
+ ID uint32
+ collector *Collector
+ abort bool
+ baseURL *url.URL
+ // ProxyURL is the proxy address that handles the request
+ ProxyURL string
+}
+
+type serializableRequest struct {
+ URL string
+ Method string
+ Body []byte
+ ID uint32
+ Ctx map[string]interface{}
+ Headers http.Header
+}
+
+// New creates a new request with the context of the original request
+func (r *Request) New(method, URL string, body io.Reader) (*Request, error) {
+ u, err := url.Parse(URL)
+ if err != nil {
+ return nil, err
+ }
+ return &Request{
+ Method: method,
+ URL: u,
+ Body: body,
+ Ctx: r.Ctx,
+ Headers: &http.Header{},
+ ID: atomic.AddUint32(&r.collector.requestCount, 1),
+ collector: r.collector,
+ }, nil
+}
+
+// Abort cancels the HTTP request when called in an OnRequest callback
+func (r *Request) Abort() {
+ r.abort = true
+}
+
+// AbsoluteURL returns with the resolved absolute URL of an URL chunk.
+// AbsoluteURL returns empty string if the URL chunk is a fragment or
+// could not be parsed
+func (r *Request) AbsoluteURL(u string) string {
+ if strings.HasPrefix(u, "#") {
+ return ""
+ }
+ var base *url.URL
+ if r.baseURL != nil {
+ base = r.baseURL
+ } else {
+ base = r.URL
+ }
+ absURL, err := base.Parse(u)
+ if err != nil {
+ return ""
+ }
+ absURL.Fragment = ""
+ if absURL.Scheme == "//" {
+ absURL.Scheme = r.URL.Scheme
+ }
+ return absURL.String()
+}
+
+// Visit continues Collector's collecting job by creating a
+// request and preserves the Context of the previous request.
+// Visit also calls the previously provided callbacks
+func (r *Request) Visit(URL string) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "GET", r.Depth+1, nil, r.Ctx, nil, true)
+}
+
+// Post continues a collector job by creating a POST request and preserves the Context
+// of the previous request.
+// Post also calls the previously provided callbacks
+func (r *Request) Post(URL string, requestData map[string]string) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createFormReader(requestData), r.Ctx, nil, true)
+}
+
+// PostRaw starts a collector job by creating a POST request with raw binary data.
+// PostRaw preserves the Context of the previous request
+// and calls the previously provided callbacks
+func (r *Request) PostRaw(URL string, requestData []byte) error {
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, bytes.NewReader(requestData), r.Ctx, nil, true)
+}
+
+// PostMultipart starts a collector job by creating a Multipart POST request
+// with raw binary data. PostMultipart also calls the previously provided.
+// callbacks
+func (r *Request) PostMultipart(URL string, requestData map[string][]byte) error {
+ boundary := randomBoundary()
+ hdr := http.Header{}
+ hdr.Set("Content-Type", "multipart/form-data; boundary="+boundary)
+ hdr.Set("User-Agent", r.collector.UserAgent)
+ return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createMultipartReader(boundary, requestData), r.Ctx, hdr, true)
+}
+
+// Retry submits HTTP request again with the same parameters
+func (r *Request) Retry() error {
+ return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, false)
+}
+
+// Do submits the request
+func (r *Request) Do() error {
+ return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, !r.collector.AllowURLRevisit)
+}
+
+// Marshal serializes the Request
+func (r *Request) Marshal() ([]byte, error) {
+ ctx := make(map[string]interface{})
+ if r.Ctx != nil {
+ r.Ctx.ForEach(func(k string, v interface{}) interface{} {
+ ctx[k] = v
+ return nil
+ })
+ }
+ var err error
+ var body []byte
+ if r.Body != nil {
+ body, err = ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ sr := &serializableRequest{
+ URL: r.URL.String(),
+ Method: r.Method,
+ Body: body,
+ ID: r.ID,
+ Ctx: ctx,
+ }
+ if r.Headers != nil {
+ sr.Headers = *r.Headers
+ }
+ return json.Marshal(sr)
+}
diff --git a/vendor/github.com/gocolly/colly/response.go b/vendor/github.com/gocolly/colly/response.go
new file mode 100644
index 0000000..11b9a63
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/response.go
@@ -0,0 +1,112 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "strings"
+
+ "github.com/saintfish/chardet"
+ "golang.org/x/net/html/charset"
+)
+
+// Response is the representation of a HTTP response made by a Collector
+type Response struct {
+ // StatusCode is the status code of the Response
+ StatusCode int
+ // Body is the content of the Response
+ Body []byte
+ // Ctx is a context between a Request and a Response
+ Ctx *Context
+ // Request is the Request object of the response
+ Request *Request
+ // Headers contains the Response's HTTP headers
+ Headers *http.Header
+}
+
+// Save writes response body to disk
+func (r *Response) Save(fileName string) error {
+ return ioutil.WriteFile(fileName, r.Body, 0644)
+}
+
+// FileName returns the sanitized file name parsed from "Content-Disposition"
+// header or from URL
+func (r *Response) FileName() string {
+ _, params, err := mime.ParseMediaType(r.Headers.Get("Content-Disposition"))
+ if fName, ok := params["filename"]; ok && err == nil {
+ return SanitizeFileName(fName)
+ }
+ if r.Request.URL.RawQuery != "" {
+ return SanitizeFileName(fmt.Sprintf("%s_%s", r.Request.URL.Path, r.Request.URL.RawQuery))
+ }
+ return SanitizeFileName(strings.TrimPrefix(r.Request.URL.Path, "/"))
+}
+
+func (r *Response) fixCharset(detectCharset bool, defaultEncoding string) error {
+ if len(r.Body) == 0 {
+ return nil
+ }
+ if defaultEncoding != "" {
+ tmpBody, err := encodeBytes(r.Body, "text/plain; charset="+defaultEncoding)
+ if err != nil {
+ return err
+ }
+ r.Body = tmpBody
+ return nil
+ }
+ contentType := strings.ToLower(r.Headers.Get("Content-Type"))
+
+ if strings.Contains(contentType, "image/") ||
+ strings.Contains(contentType, "video/") ||
+ strings.Contains(contentType, "audio/") ||
+ strings.Contains(contentType, "font/") {
+ // These MIME types should not have textual data.
+
+ return nil
+ }
+
+ if !strings.Contains(contentType, "charset") {
+ if !detectCharset {
+ return nil
+ }
+ d := chardet.NewTextDetector()
+ r, err := d.DetectBest(r.Body)
+ if err != nil {
+ return err
+ }
+ contentType = "text/plain; charset=" + r.Charset
+ }
+ if strings.Contains(contentType, "utf-8") || strings.Contains(contentType, "utf8") {
+ return nil
+ }
+ tmpBody, err := encodeBytes(r.Body, contentType)
+ if err != nil {
+ return err
+ }
+ r.Body = tmpBody
+ return nil
+}
+
+func encodeBytes(b []byte, contentType string) ([]byte, error) {
+ r, err := charset.NewReader(bytes.NewReader(b), contentType)
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(r)
+}
diff --git a/vendor/github.com/gocolly/colly/storage/storage.go b/vendor/github.com/gocolly/colly/storage/storage.go
new file mode 100644
index 0000000..fcb0c0c
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/storage/storage.go
@@ -0,0 +1,128 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "net/http"
+ "net/http/cookiejar"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Storage is an interface which handles Collector's internal data,
+// like visited urls and cookies.
+// The default Storage of the Collector is the InMemoryStorage.
+// Collector's storage can be changed by calling Collector.SetStorage()
+// function.
+type Storage interface {
+ // Init initializes the storage
+ Init() error
+ // Visited receives and stores a request ID that is visited by the Collector
+ Visited(requestID uint64) error
+ // IsVisited returns true if the request was visited before IsVisited
+ // is called
+ IsVisited(requestID uint64) (bool, error)
+ // Cookies retrieves stored cookies for a given host
+ Cookies(u *url.URL) string
+ // SetCookies stores cookies for a given host
+ SetCookies(u *url.URL, cookies string)
+}
+
+// InMemoryStorage is the default storage backend of colly.
+// InMemoryStorage keeps cookies and visited urls in memory
+// without persisting data on the disk.
+type InMemoryStorage struct {
+ visitedURLs map[uint64]bool
+ lock *sync.RWMutex
+ jar *cookiejar.Jar
+}
+
+// Init initializes InMemoryStorage
+func (s *InMemoryStorage) Init() error {
+ if s.visitedURLs == nil {
+ s.visitedURLs = make(map[uint64]bool)
+ }
+ if s.lock == nil {
+ s.lock = &sync.RWMutex{}
+ }
+ if s.jar == nil {
+ var err error
+ s.jar, err = cookiejar.New(nil)
+ return err
+ }
+ return nil
+}
+
+// Visited implements Storage.Visited()
+func (s *InMemoryStorage) Visited(requestID uint64) error {
+ s.lock.Lock()
+ s.visitedURLs[requestID] = true
+ s.lock.Unlock()
+ return nil
+}
+
+// IsVisited implements Storage.IsVisited()
+func (s *InMemoryStorage) IsVisited(requestID uint64) (bool, error) {
+ s.lock.RLock()
+ visited := s.visitedURLs[requestID]
+ s.lock.RUnlock()
+ return visited, nil
+}
+
+// Cookies implements Storage.Cookies()
+func (s *InMemoryStorage) Cookies(u *url.URL) string {
+ return StringifyCookies(s.jar.Cookies(u))
+}
+
+// SetCookies implements Storage.SetCookies()
+func (s *InMemoryStorage) SetCookies(u *url.URL, cookies string) {
+ s.jar.SetCookies(u, UnstringifyCookies(cookies))
+}
+
+// Close implements Storage.Close()
+func (s *InMemoryStorage) Close() error {
+ return nil
+}
+
+// StringifyCookies serializes list of http.Cookies to string
+func StringifyCookies(cookies []*http.Cookie) string {
+ // Stringify cookies.
+ cs := make([]string, len(cookies))
+ for i, c := range cookies {
+ cs[i] = c.String()
+ }
+ return strings.Join(cs, "\n")
+}
+
+// UnstringifyCookies deserializes a cookie string to http.Cookies
+func UnstringifyCookies(s string) []*http.Cookie {
+ h := http.Header{}
+ for _, c := range strings.Split(s, "\n") {
+ h.Add("Set-Cookie", c)
+ }
+ r := http.Response{Header: h}
+ return r.Cookies()
+}
+
+// ContainsCookie checks if a cookie name is represented in cookies
+func ContainsCookie(cookies []*http.Cookie, name string) bool {
+ for _, c := range cookies {
+ if c.Name == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gocolly/colly/unmarshal.go b/vendor/github.com/gocolly/colly/unmarshal.go
new file mode 100644
index 0000000..302f258
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/unmarshal.go
@@ -0,0 +1,218 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+
+ "github.com/PuerkitoBio/goquery"
+)
+
+// Unmarshal is a shorthand for colly.UnmarshalHTML
+func (h *HTMLElement) Unmarshal(v interface{}) error {
+ return UnmarshalHTML(v, h.DOM, nil)
+}
+
+// UnmarshalWithMap is a shorthand for colly.UnmarshalHTML, extended to allow maps to be passed in.
+func (h *HTMLElement) UnmarshalWithMap(v interface{}, structMap map[string]string) error {
+ return UnmarshalHTML(v, h.DOM, structMap)
+}
+
+// UnmarshalHTML declaratively extracts text or attributes to a struct from
+// HTML response using struct tags composed of css selectors.
+// Allowed struct tags:
+// - "selector" (required): CSS (goquery) selector of the desired data
+// - "attr" (optional): Selects the matching element's attribute's value.
+// Leave it blank or omit to get the text of the element.
+//
+// Example struct declaration:
+//
+// type Nested struct {
+// String string `selector:"div > p"`
+// Classes []string `selector:"li" attr:"class"`
+// Struct *Nested `selector:"div > div"`
+// }
+//
+// Supported types: struct, *struct, string, []string
+func UnmarshalHTML(v interface{}, s *goquery.Selection, structMap map[string]string) error {
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("Invalid type or nil-pointer")
+ }
+
+ sv := rv.Elem()
+ st := reflect.TypeOf(v).Elem()
+ if structMap != nil {
+ for k, v := range structMap {
+ attrV := sv.FieldByName(k)
+ if !attrV.CanAddr() || !attrV.CanSet() {
+ continue
+ }
+ if err := unmarshalSelector(s, attrV, v); err != nil {
+ return err
+ }
+ }
+ } else {
+ for i := 0; i < sv.NumField(); i++ {
+ attrV := sv.Field(i)
+ if !attrV.CanAddr() || !attrV.CanSet() {
+ continue
+ }
+ if err := unmarshalAttr(s, attrV, st.Field(i)); err != nil {
+ return err
+ }
+
+ }
+ }
+
+ return nil
+}
+
+func unmarshalSelector(s *goquery.Selection, attrV reflect.Value, selector string) error {
+ //selector is "-" specify that field should ignore.
+ if selector == "-" {
+ return nil
+ }
+ htmlAttr := ""
+ // TODO support more types
+ switch attrV.Kind() {
+ case reflect.Slice:
+ if err := unmarshalSlice(s, selector, htmlAttr, attrV); err != nil {
+ return err
+ }
+ case reflect.String:
+ val := getDOMValue(s.Find(selector), htmlAttr)
+ attrV.Set(reflect.Indirect(reflect.ValueOf(val)))
+ case reflect.Struct:
+ if err := unmarshalStruct(s, selector, attrV); err != nil {
+ return err
+ }
+ case reflect.Ptr:
+ if err := unmarshalPtr(s, selector, attrV); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Invalid type: " + attrV.String())
+ }
+ return nil
+}
+
+func unmarshalAttr(s *goquery.Selection, attrV reflect.Value, attrT reflect.StructField) error {
+ selector := attrT.Tag.Get("selector")
+ //selector is "-" specify that field should ignore.
+ if selector == "-" {
+ return nil
+ }
+ htmlAttr := attrT.Tag.Get("attr")
+ // TODO support more types
+ switch attrV.Kind() {
+ case reflect.Slice:
+ if err := unmarshalSlice(s, selector, htmlAttr, attrV); err != nil {
+ return err
+ }
+ case reflect.String:
+ val := getDOMValue(s.Find(selector), htmlAttr)
+ attrV.Set(reflect.Indirect(reflect.ValueOf(val)))
+ case reflect.Struct:
+ if err := unmarshalStruct(s, selector, attrV); err != nil {
+ return err
+ }
+ case reflect.Ptr:
+ if err := unmarshalPtr(s, selector, attrV); err != nil {
+ return err
+ }
+ default:
+ return errors.New("Invalid type: " + attrV.String())
+ }
+ return nil
+}
+
+func unmarshalStruct(s *goquery.Selection, selector string, attrV reflect.Value) error {
+ newS := s
+ if selector != "" {
+ newS = newS.Find(selector)
+ }
+ if newS.Nodes == nil {
+ return nil
+ }
+ v := reflect.New(attrV.Type())
+ err := UnmarshalHTML(v.Interface(), newS, nil)
+ if err != nil {
+ return err
+ }
+ attrV.Set(reflect.Indirect(v))
+ return nil
+}
+
+func unmarshalPtr(s *goquery.Selection, selector string, attrV reflect.Value) error {
+ newS := s
+ if selector != "" {
+ newS = newS.Find(selector)
+ }
+ if newS.Nodes == nil {
+ return nil
+ }
+ e := attrV.Type().Elem()
+ if e.Kind() != reflect.Struct {
+ return errors.New("Invalid slice type")
+ }
+ v := reflect.New(e)
+ err := UnmarshalHTML(v.Interface(), newS, nil)
+ if err != nil {
+ return err
+ }
+ attrV.Set(v)
+ return nil
+}
+
+func unmarshalSlice(s *goquery.Selection, selector, htmlAttr string, attrV reflect.Value) error {
+ if attrV.Pointer() == 0 {
+ v := reflect.MakeSlice(attrV.Type(), 0, 0)
+ attrV.Set(v)
+ }
+ switch attrV.Type().Elem().Kind() {
+ case reflect.String:
+ s.Find(selector).Each(func(_ int, s *goquery.Selection) {
+ val := getDOMValue(s, htmlAttr)
+ attrV.Set(reflect.Append(attrV, reflect.Indirect(reflect.ValueOf(val))))
+ })
+ case reflect.Ptr:
+ s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
+ someVal := reflect.New(attrV.Type().Elem().Elem())
+ UnmarshalHTML(someVal.Interface(), innerSel, nil)
+ attrV.Set(reflect.Append(attrV, someVal))
+ })
+ case reflect.Struct:
+ s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
+ someVal := reflect.New(attrV.Type().Elem())
+ UnmarshalHTML(someVal.Interface(), innerSel, nil)
+ attrV.Set(reflect.Append(attrV, reflect.Indirect(someVal)))
+ })
+ default:
+ return errors.New("Invalid slice type")
+ }
+ return nil
+}
+
+func getDOMValue(s *goquery.Selection, attr string) string {
+ if attr == "" {
+ return strings.TrimSpace(s.First().Text())
+ }
+ attrV, _ := s.Attr(attr)
+ return attrV
+}
diff --git a/vendor/github.com/gocolly/colly/unmarshal_test.go b/vendor/github.com/gocolly/colly/unmarshal_test.go
new file mode 100644
index 0000000..72aab29
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/unmarshal_test.go
@@ -0,0 +1,163 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/PuerkitoBio/goquery"
+)
+
+var basicTestData = []byte(`
list item 1
list item 2
3
`)
+var nestedTestData = []byte(`
a
b
c
`)
+var pointerSliceTestData = []byte(`
Information: Info 1
Information: Info 2
`)
+
+func TestBasicUnmarshal(t *testing.T) {
+ doc, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(basicTestData))
+ e := &HTMLElement{
+ DOM: doc.First(),
+ }
+ s := struct {
+ String string `selector:"li:first-child" attr:"class"`
+ Items []string `selector:"li"`
+ Struct struct {
+ String string `selector:"li:last-child"`
+ }
+ }{}
+ if err := e.Unmarshal(&s); err != nil {
+ t.Error("Cannot unmarshal struct: " + err.Error())
+ }
+ if s.String != "x" {
+ t.Errorf(`Invalid data for String: %q, expected "x"`, s.String)
+ }
+ if s.Struct.String != "3" {
+ t.Errorf(`Invalid data for Struct.String: %q, expected "3"`, s.Struct.String)
+ }
+}
+
+func TestNestedUnmarshalMap(t *testing.T) {
+ doc, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(nestedTestData))
+ e := &HTMLElement{
+ DOM: doc.First(),
+ }
+ doc2, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(basicTestData))
+ e2 := &HTMLElement{
+ DOM: doc2.First(),
+ }
+ type nested struct {
+ String string
+ }
+ mapSelector := make(map[string]string)
+ mapSelector["String"] = "div > p"
+
+ mapSelector2 := make(map[string]string)
+ mapSelector2["String"] = "span"
+
+ s := nested{}
+ s2 := nested{}
+ if err := e.UnmarshalWithMap(&s, mapSelector); err != nil {
+ t.Error("Cannot unmarshal struct: " + err.Error())
+ }
+ if err := e2.UnmarshalWithMap(&s2, mapSelector2); err != nil {
+ t.Error("Cannot unmarshal struct: " + err.Error())
+ }
+ if s.String != "a" {
+ t.Errorf(`Invalid data for String: %q, expected "a"`, s.String)
+ }
+ if s2.String != "item" {
+ t.Errorf(`Invalid data for String: %q, expected "a"`, s.String)
+ }
+}
+
+func TestNestedUnmarshal(t *testing.T) {
+ doc, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(nestedTestData))
+ e := &HTMLElement{
+ DOM: doc.First(),
+ }
+ type nested struct {
+ String string `selector:"div > p"`
+ Struct *nested `selector:"div > div"`
+ }
+ s := nested{}
+ if err := e.Unmarshal(&s); err != nil {
+ t.Error("Cannot unmarshal struct: " + err.Error())
+ }
+ if s.String != "a" {
+ t.Errorf(`Invalid data for String: %q, expected "a"`, s.String)
+ }
+ if s.Struct.String != "b" {
+ t.Errorf(`Invalid data for Struct.String: %q, expected "b"`, s.Struct.String)
+ }
+ if s.Struct.Struct.String != "c" {
+ t.Errorf(`Invalid data for Struct.Struct.String: %q, expected "c"`, s.Struct.Struct.String)
+ }
+}
+
+func TestPointerSliceUnmarshall(t *testing.T) {
+ type info struct {
+ Text string `selector:"span"`
+ }
+ type object struct {
+ Info []*info `selector:"li.info"`
+ }
+
+ doc, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(pointerSliceTestData))
+ e := HTMLElement{DOM: doc.First()}
+ o := object{}
+ err := e.Unmarshal(&o)
+ if err != nil {
+ t.Fatalf("Failed to unmarshal page: %s\n", err.Error())
+ }
+
+ if len(o.Info) != 2 {
+ t.Errorf("Invalid length for Info: %d, expected 2", len(o.Info))
+ }
+ if o.Info[0].Text != "Info 1" {
+ t.Errorf("Invalid data for Info.[0].Text: %s, expected Info 1", o.Info[0].Text)
+ }
+ if o.Info[1].Text != "Info 2" {
+ t.Errorf("Invalid data for Info.[1].Text: %s, expected Info 2", o.Info[1].Text)
+ }
+
+}
+
+func TestStructSliceUnmarshall(t *testing.T) {
+ type info struct {
+ Text string `selector:"span"`
+ }
+ type object struct {
+ Info []info `selector:"li.info"`
+ }
+
+ doc, _ := goquery.NewDocumentFromReader(bytes.NewBuffer(pointerSliceTestData))
+ e := HTMLElement{DOM: doc.First()}
+ o := object{}
+ err := e.Unmarshal(&o)
+ if err != nil {
+ t.Fatalf("Failed to unmarshal page: %s\n", err.Error())
+ }
+
+ if len(o.Info) != 2 {
+ t.Errorf("Invalid length for Info: %d, expected 2", len(o.Info))
+ }
+ if o.Info[0].Text != "Info 1" {
+ t.Errorf("Invalid data for Info.[0].Text: %s, expected Info 1", o.Info[0].Text)
+ }
+ if o.Info[1].Text != "Info 2" {
+ t.Errorf("Invalid data for Info.[1].Text: %s, expected Info 2", o.Info[1].Text)
+ }
+
+}
diff --git a/vendor/github.com/gocolly/colly/xmlelement.go b/vendor/github.com/gocolly/colly/xmlelement.go
new file mode 100644
index 0000000..7ff5fe5
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/xmlelement.go
@@ -0,0 +1,170 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly
+
+import (
+ "encoding/xml"
+ "strings"
+
+ "github.com/antchfx/htmlquery"
+ "github.com/antchfx/xmlquery"
+ "golang.org/x/net/html"
+)
+
+// XMLElement is the representation of a XML tag.
+type XMLElement struct {
+ // Name is the name of the tag
+ Name string
+ Text string
+ attributes interface{}
+ // Request is the request object of the element's HTML document
+ Request *Request
+ // Response is the Response object of the element's HTML document
+ Response *Response
+ // DOM is the DOM object of the page. DOM is relative
+ // to the current XMLElement and is either a html.Node or xmlquery.Node
+ // based on how the XMLElement was created.
+ DOM interface{}
+ isHTML bool
+}
+
+// NewXMLElementFromHTMLNode creates a XMLElement from a html.Node.
+func NewXMLElementFromHTMLNode(resp *Response, s *html.Node) *XMLElement {
+ return &XMLElement{
+ Name: s.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: htmlquery.InnerText(s),
+ DOM: s,
+ attributes: s.Attr,
+ isHTML: true,
+ }
+}
+
+// NewXMLElementFromXMLNode creates a XMLElement from a xmlquery.Node.
+func NewXMLElementFromXMLNode(resp *Response, s *xmlquery.Node) *XMLElement {
+ return &XMLElement{
+ Name: s.Data,
+ Request: resp.Request,
+ Response: resp,
+ Text: s.InnerText(),
+ DOM: s,
+ attributes: s.Attr,
+ isHTML: false,
+ }
+}
+
+// Attr returns the selected attribute of a HTMLElement or empty string
+// if no attribute found
+func (h *XMLElement) Attr(k string) string {
+ if h.isHTML {
+ for _, a := range h.attributes.([]html.Attribute) {
+ if a.Key == k {
+ return a.Val
+ }
+ }
+ } else {
+ for _, a := range h.attributes.([]xml.Attr) {
+ if a.Name.Local == k {
+ return a.Value
+ }
+ }
+ }
+ return ""
+}
+
+// ChildText returns the concatenated and stripped text content of the matching
+// elements.
+func (h *XMLElement) ChildText(xpathQuery string) string {
+ if h.isHTML {
+ child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
+ if child == nil {
+ return ""
+ }
+ return strings.TrimSpace(htmlquery.InnerText(child))
+ }
+ child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
+ if child == nil {
+ return ""
+ }
+ return strings.TrimSpace(child.InnerText())
+
+}
+
+// ChildAttr returns the stripped text content of the first matching
+// element's attribute.
+func (h *XMLElement) ChildAttr(xpathQuery, attrName string) string {
+ if h.isHTML {
+ child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
+ if child != nil {
+ for _, attr := range child.Attr {
+ if attr.Key == attrName {
+ return strings.TrimSpace(attr.Val)
+ }
+ }
+ }
+ } else {
+ child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
+ if child != nil {
+ for _, attr := range child.Attr {
+ if attr.Name.Local == attrName {
+ return strings.TrimSpace(attr.Value)
+ }
+ }
+ }
+ }
+
+ return ""
+}
+
+// ChildAttrs returns the stripped text content of all the matching
+// element's attributes.
+func (h *XMLElement) ChildAttrs(xpathQuery, attrName string) []string {
+ var res []string
+ if h.isHTML {
+ for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
+ for _, attr := range child.Attr {
+ if attr.Key == attrName {
+ res = append(res, strings.TrimSpace(attr.Val))
+ }
+ }
+ }
+ } else {
+ xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
+ for _, attr := range child.Attr {
+ if attr.Name.Local == attrName {
+ res = append(res, strings.TrimSpace(attr.Value))
+ }
+ }
+ })
+ }
+ return res
+}
+
+// ChildTexts returns an array of strings corresponding to child elements that match the xpath query.
+// Each item in the array is the stripped text content of the corresponding matching child element.
+func (h *XMLElement) ChildTexts(xpathQuery string) []string {
+ texts := make([]string, 0)
+ if h.isHTML {
+ for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
+ texts = append(texts, strings.TrimSpace(htmlquery.InnerText(child)))
+ }
+ } else {
+ xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
+ texts = append(texts, strings.TrimSpace(child.InnerText()))
+ })
+ }
+ return texts
+}
diff --git a/vendor/github.com/gocolly/colly/xmlelement_test.go b/vendor/github.com/gocolly/colly/xmlelement_test.go
new file mode 100644
index 0000000..ac7a1ae
--- /dev/null
+++ b/vendor/github.com/gocolly/colly/xmlelement_test.go
@@ -0,0 +1,123 @@
+// Copyright 2018 Adam Tauber
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package colly_test
+
+import (
+ "github.com/antchfx/htmlquery"
+ "github.com/gocolly/colly"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// Borrowed from http://infohost.nmt.edu/tcc/help/pubs/xhtml/example.html
+// Added attributes to the `
` tags for testing purposes
+const htmlPage = `
+
+
+
+ Your page title here
+
+
+
Your major heading here
+
+ This is a regular text paragraph.
+
+
+
+ First bullet of a bullet list.
+
+
+ This is the second bullet.
+
+
+
+
+`
+
+func TestAttr(t *testing.T) {
+ resp := &colly.Response{StatusCode: 200, Body: []byte(htmlPage)}
+ doc, _ := htmlquery.Parse(strings.NewReader(htmlPage))
+ xmlNode := htmlquery.FindOne(doc, "/html")
+ xmlElem := colly.NewXMLElementFromHTMLNode(resp, xmlNode)
+
+ if xmlElem.Attr("xmlns") != "http://www.w3.org/1999/xhtml" {
+ t.Fatalf("failed xmlns attribute test: %v != http://www.w3.org/1999/xhtml", xmlElem.Attr("xmlns"))
+ }
+
+ if xmlElem.Attr("xml:lang") != "en" {
+ t.Fatalf("failed lang attribute test: %v != en", xmlElem.Attr("lang"))
+ }
+}
+
+func TestChildText(t *testing.T) {
+ resp := &colly.Response{StatusCode: 200, Body: []byte(htmlPage)}
+ doc, _ := htmlquery.Parse(strings.NewReader(htmlPage))
+ xmlNode := htmlquery.FindOne(doc, "/html")
+ xmlElem := colly.NewXMLElementFromHTMLNode(resp, xmlNode)
+
+ if text := xmlElem.ChildText("//p"); text != "This is a regular text paragraph." {
+ t.Fatalf("failed child tag test: %v != This is a regular text paragraph.", text)
+ }
+ if text := xmlElem.ChildText("//dl"); text != "" {
+ t.Fatalf("failed child tag test: %v != \"\"", text)
+ }
+}
+
+func TestChildTexts(t *testing.T) {
+ resp := &colly.Response{StatusCode: 200, Body: []byte(htmlPage)}
+ doc, _ := htmlquery.Parse(strings.NewReader(htmlPage))
+ xmlNode := htmlquery.FindOne(doc, "/html")
+ xmlElem := colly.NewXMLElementFromHTMLNode(resp, xmlNode)
+ expected := []string{"First bullet of a bullet list.", "This is the second bullet."}
+ if texts := xmlElem.ChildTexts("//li"); reflect.DeepEqual(texts, expected) == false {
+ t.Fatalf("failed child tags test: %v != %v", texts, expected)
+ }
+ if texts := xmlElem.ChildTexts("//dl"); reflect.DeepEqual(texts, make([]string, 0)) == false {
+ t.Fatalf("failed child tag test: %v != \"\"", texts)
+ }
+}
+func TestChildAttr(t *testing.T) {
+ resp := &colly.Response{StatusCode: 200, Body: []byte(htmlPage)}
+ doc, _ := htmlquery.Parse(strings.NewReader(htmlPage))
+ xmlNode := htmlquery.FindOne(doc, "/html")
+ xmlElem := colly.NewXMLElementFromHTMLNode(resp, xmlNode)
+
+ if attr := xmlElem.ChildAttr("/body/ul/li[1]", "class"); attr != "list-item-1" {
+ t.Fatalf("failed child attribute test: %v != list-item-1", attr)
+ }
+ if attr := xmlElem.ChildAttr("/body/ul/li[2]", "class"); attr != "list-item-2" {
+ t.Fatalf("failed child attribute test: %v != list-item-2", attr)
+ }
+}
+
+func TestChildAttrs(t *testing.T) {
+ resp := &colly.Response{StatusCode: 200, Body: []byte(htmlPage)}
+ doc, _ := htmlquery.Parse(strings.NewReader(htmlPage))
+ xmlNode := htmlquery.FindOne(doc, "/html")
+ xmlElem := colly.NewXMLElementFromHTMLNode(resp, xmlNode)
+
+ attrs := xmlElem.ChildAttrs("/body/ul/li", "class")
+ if len(attrs) != 2 {
+ t.Fatalf("failed child attributes length test: %d != 2", len(attrs))
+ }
+
+ for _, attr := range attrs {
+ if !(attr == "list-item-1" || attr == "list-item-2") {
+ t.Fatalf("failed child attributes values test: %s != list-item-(1 or 2)", attr)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/lint/.travis.yml b/vendor/github.com/golang/lint/.travis.yml
new file mode 100644
index 0000000..47af085
--- /dev/null
+++ b/vendor/github.com/golang/lint/.travis.yml
@@ -0,0 +1,18 @@
+sudo: false
+language: go
+go:
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - master
+
+install:
+ - go get -t -v ./...
+
+script:
+ - go test -v -race ./...
+
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
diff --git a/vendor/github.com/golang/lint/CONTRIBUTING.md b/vendor/github.com/golang/lint/CONTRIBUTING.md
new file mode 100644
index 0000000..971da12
--- /dev/null
+++ b/vendor/github.com/golang/lint/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Golint
+
+## Before filing an issue:
+
+### Are you having trouble building golint?
+
+Check you have the latest version of its dependencies. Run
+```
+go get -u github.com/golang/lint
+```
+If you still have problems, consider searching for existing issues before filing a new issue.
+
+## Before sending a pull request:
+
+Have you understood the purpose of golint? Make sure to carefully read `README`.
diff --git a/vendor/github.com/golang/lint/LICENSE b/vendor/github.com/golang/lint/LICENSE
new file mode 100644
index 0000000..65d761b
--- /dev/null
+++ b/vendor/github.com/golang/lint/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/lint/README.md b/vendor/github.com/golang/lint/README.md
new file mode 100644
index 0000000..3593ddd
--- /dev/null
+++ b/vendor/github.com/golang/lint/README.md
@@ -0,0 +1,82 @@
+Golint is a linter for Go source code.
+
+[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
+
+## Installation
+
+Golint requires Go 1.6 or later.
+
+ go get -u github.com/golang/lint/golint
+
+## Usage
+
+Invoke `golint` with one or more filenames, directories, or packages named
+by its import path. Golint uses the same
+[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
+the `go` command and therefore
+also supports relative import paths like `./...`. Additionally the `...`
+wildcard can be used as suffix on relative and absolute file paths to recurse
+into them.
+
+The output of this tool is a list of suggestions in Vim quickfix format,
+which is accepted by lots of different editors.
+
+## Purpose
+
+Golint differs from gofmt. Gofmt reformats Go source code, whereas
+golint prints out style mistakes.
+
+Golint differs from govet. Govet is concerned with correctness, whereas
+golint is concerned with coding style. Golint is in use at Google, and it
+seeks to match the accepted style of the open source Go project.
+
+The suggestions made by golint are exactly that: suggestions.
+Golint is not perfect, and has both false positives and false negatives.
+Do not treat its output as a gold standard. We will not be adding pragmas
+or other knobs to suppress specific warnings, so do not expect or require
+code to be completely "lint-free".
+In short, this tool is not, and will never be, trustworthy enough for its
+suggestions to be enforced automatically, for example as part of a build process.
+Golint makes suggestions for many of the mechanically checkable items listed in
+[Effective Go](https://golang.org/doc/effective_go.html) and the
+[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
+
+If you find an established style that is frequently violated, and which
+you think golint could statically check,
+[file an issue](https://github.com/golang/lint/issues).
+
+## Contributions
+
+Contributions to this project are welcome, though please send mail before
+starting work on anything major. Contributors retain their copyright, so we
+need you to fill out
+[a short form](https://developers.google.com/open-source/cla/individual)
+before we can accept your contribution.
+
+## Vim
+
+Add this to your ~/.vimrc:
+
+ set rtp+=$GOPATH/src/github.com/golang/lint/misc/vim
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running `:Lint` will run golint on the current file and populate the quickfix list.
+
+Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
+
+ autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
+
+
+## Emacs
+
+Add this to your `.emacs` file:
+
+ (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs"))
+ (require 'golint)
+
+If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
+
+Running M-x golint will run golint on the current file.
+
+For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).
diff --git a/vendor/github.com/golang/lint/golint/golint.go b/vendor/github.com/golang/lint/golint/golint.go
new file mode 100644
index 0000000..d8360ad
--- /dev/null
+++ b/vendor/github.com/golang/lint/golint/golint.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// golint lints the Go source files named on its command line.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/golang/lint"
+)
+
+var (
+ minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
+ setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
+ suggestions int
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
+ fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ lintDir(".")
+ } else {
+ // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
+ // directory, file or package targets. The distinction affects which
+ // checks are run. It is no valid to mix target types.
+ var dirsRun, filesRun, pkgsRun int
+ var args []string
+ for _, arg := range flag.Args() {
+ if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
+ dirsRun = 1
+ for _, dirname := range allPackagesInFS(arg) {
+ args = append(args, dirname)
+ }
+ } else if isDir(arg) {
+ dirsRun = 1
+ args = append(args, arg)
+ } else if exists(arg) {
+ filesRun = 1
+ args = append(args, arg)
+ } else {
+ pkgsRun = 1
+ args = append(args, arg)
+ }
+ }
+
+ if dirsRun+filesRun+pkgsRun != 1 {
+ usage()
+ os.Exit(2)
+ }
+ switch {
+ case dirsRun == 1:
+ for _, dir := range args {
+ lintDir(dir)
+ }
+ case filesRun == 1:
+ lintFiles(args...)
+ case pkgsRun == 1:
+ for _, pkg := range importPaths(args) {
+ lintPackage(pkg)
+ }
+ }
+ }
+
+ if *setExitStatus && suggestions > 0 {
+ fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
+ os.Exit(1)
+ }
+}
+
+func isDir(filename string) bool {
+ fi, err := os.Stat(filename)
+ return err == nil && fi.IsDir()
+}
+
+func exists(filename string) bool {
+ _, err := os.Stat(filename)
+ return err == nil
+}
+
+func lintFiles(filenames ...string) {
+ files := make(map[string][]byte)
+ for _, filename := range filenames {
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+ files[filename] = src
+ }
+
+ l := new(lint.Linter)
+ ps, err := l.LintFiles(files)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+ for _, p := range ps {
+ if p.Confidence >= *minConfidence {
+ fmt.Printf("%v: %s\n", p.Position, p.Text)
+ suggestions++
+ }
+ }
+}
+
+func lintDir(dirname string) {
+ pkg, err := build.ImportDir(dirname, 0)
+ lintImportedPackage(pkg, err)
+}
+
+func lintPackage(pkgname string) {
+ pkg, err := build.Import(pkgname, ".", 0)
+ lintImportedPackage(pkg, err)
+}
+
+func lintImportedPackage(pkg *build.Package, err error) {
+ if err != nil {
+ if _, nogo := err.(*build.NoGoError); nogo {
+ // Don't complain if the failure is due to no Go source files.
+ return
+ }
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+
+ var files []string
+ files = append(files, pkg.GoFiles...)
+ files = append(files, pkg.CgoFiles...)
+ files = append(files, pkg.TestGoFiles...)
+ if pkg.Dir != "." {
+ for i, f := range files {
+ files[i] = filepath.Join(pkg.Dir, f)
+ }
+ }
+ // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
+
+ lintFiles(files...)
+}
diff --git a/vendor/github.com/golang/lint/golint/import.go b/vendor/github.com/golang/lint/golint/import.go
new file mode 100644
index 0000000..02a0daa
--- /dev/null
+++ b/vendor/github.com/golang/lint/golint/import.go
@@ -0,0 +1,310 @@
+package main
+
+/*
+
+This file holds a direct copy of the import path matching code of
+https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
+replaced when https://golang.org/issue/8768 is resolved.
+
+It has been updated to follow upstream changes in a few ways.
+
+*/
+
+import (
+ "fmt"
+ "go/build"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+var buildContext = build.Default
+
+var (
+ goroot = filepath.Clean(runtime.GOROOT())
+ gorootSrc = filepath.Join(goroot, "src")
+)
+
+// importPathsNoDotExpansion returns the import paths to use for the given
+// command line, but it does no ... expansion.
+func importPathsNoDotExpansion(args []string) []string {
+ if len(args) == 0 {
+ return []string{"."}
+ }
+ var out []string
+ for _, a := range args {
+ // Arguments are supposed to be import paths, but
+ // as a courtesy to Windows developers, rewrite \ to /
+ // in command-line arguments. Handles .\... and so on.
+ if filepath.Separator == '\\' {
+ a = strings.Replace(a, `\`, `/`, -1)
+ }
+
+ // Put argument in canonical form, but preserve leading ./.
+ if strings.HasPrefix(a, "./") {
+ a = "./" + path.Clean(a)
+ if a == "./." {
+ a = "."
+ }
+ } else {
+ a = path.Clean(a)
+ }
+ if a == "all" || a == "std" {
+ out = append(out, allPackages(a)...)
+ continue
+ }
+ out = append(out, a)
+ }
+ return out
+}
+
+// importPaths returns the import paths to use for the given command line.
+func importPaths(args []string) []string {
+ args = importPathsNoDotExpansion(args)
+ var out []string
+ for _, a := range args {
+ if strings.Contains(a, "...") {
+ if build.IsLocalImport(a) {
+ out = append(out, allPackagesInFS(a)...)
+ } else {
+ out = append(out, allPackages(a)...)
+ }
+ continue
+ }
+ out = append(out, a)
+ }
+ return out
+}
+
+// matchPattern(pattern)(name) reports whether
+// name matches pattern. Pattern is a limited glob
+// pattern in which '...' means 'any string' and there
+// is no other special syntax.
+func matchPattern(pattern string) func(name string) bool {
+ re := regexp.QuoteMeta(pattern)
+ re = strings.Replace(re, `\.\.\.`, `.*`, -1)
+ // Special case: foo/... matches foo too.
+ if strings.HasSuffix(re, `/.*`) {
+ re = re[:len(re)-len(`/.*`)] + `(/.*)?`
+ }
+ reg := regexp.MustCompile(`^` + re + `$`)
+ return func(name string) bool {
+ return reg.MatchString(name)
+ }
+}
+
+// hasPathPrefix reports whether the path s begins with the
+// elements in prefix.
+func hasPathPrefix(s, prefix string) bool {
+ switch {
+ default:
+ return false
+ case len(s) == len(prefix):
+ return s == prefix
+ case len(s) > len(prefix):
+ if prefix != "" && prefix[len(prefix)-1] == '/' {
+ return strings.HasPrefix(s, prefix)
+ }
+ return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
+ }
+}
+
+// treeCanMatchPattern(pattern)(name) reports whether
+// name or children of name can possibly match pattern.
+// Pattern is the same limited glob accepted by matchPattern.
+func treeCanMatchPattern(pattern string) func(name string) bool {
+ wildCard := false
+ if i := strings.Index(pattern, "..."); i >= 0 {
+ wildCard = true
+ pattern = pattern[:i]
+ }
+ return func(name string) bool {
+ return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
+ wildCard && strings.HasPrefix(name, pattern)
+ }
+}
+
+// allPackages returns all the packages that can be found
+// under the $GOPATH directories and $GOROOT matching pattern.
+// The pattern is either "all" (all packages), "std" (standard packages)
+// or a path including "...".
+func allPackages(pattern string) []string {
+ pkgs := matchPackages(pattern)
+ if len(pkgs) == 0 {
+ fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
+ }
+ return pkgs
+}
+
+func matchPackages(pattern string) []string {
+ match := func(string) bool { return true }
+ treeCanMatch := func(string) bool { return true }
+ if pattern != "all" && pattern != "std" {
+ match = matchPattern(pattern)
+ treeCanMatch = treeCanMatchPattern(pattern)
+ }
+
+ have := map[string]bool{
+ "builtin": true, // ignore pseudo-package that exists only for documentation
+ }
+ if !buildContext.CgoEnabled {
+ have["runtime/cgo"] = true // ignore during walk
+ }
+ var pkgs []string
+
+ // Commands
+ cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
+ filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() || path == cmd {
+ return nil
+ }
+ name := path[len(cmd):]
+ if !treeCanMatch(name) {
+ return filepath.SkipDir
+ }
+ // Commands are all in cmd/, not in subdirectories.
+ if strings.Contains(name, string(filepath.Separator)) {
+ return filepath.SkipDir
+ }
+
+ // We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
+ name = "cmd/" + name
+ if have[name] {
+ return nil
+ }
+ have[name] = true
+ if !match(name) {
+ return nil
+ }
+ _, err = buildContext.ImportDir(path, 0)
+ if err != nil {
+ if _, noGo := err.(*build.NoGoError); !noGo {
+ log.Print(err)
+ }
+ return nil
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+
+ for _, src := range buildContext.SrcDirs() {
+ if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
+ continue
+ }
+ src = filepath.Clean(src) + string(filepath.Separator)
+ root := src
+ if pattern == "cmd" {
+ root += "cmd" + string(filepath.Separator)
+ }
+ filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() || path == src {
+ return nil
+ }
+
+ // Avoid .foo, _foo, and testdata directory trees.
+ _, elem := filepath.Split(path)
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return filepath.SkipDir
+ }
+
+ name := filepath.ToSlash(path[len(src):])
+ if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
+ // The name "std" is only the standard library.
+ // If the name is cmd, it's the root of the command tree.
+ return filepath.SkipDir
+ }
+ if !treeCanMatch(name) {
+ return filepath.SkipDir
+ }
+ if have[name] {
+ return nil
+ }
+ have[name] = true
+ if !match(name) {
+ return nil
+ }
+ _, err = buildContext.ImportDir(path, 0)
+ if err != nil {
+ if _, noGo := err.(*build.NoGoError); noGo {
+ return nil
+ }
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+ }
+ return pkgs
+}
+
+// allPackagesInFS is like allPackages but is passed a pattern
+// beginning ./ or ../, meaning it should scan the tree rooted
+// at the given directory. There are ... in the pattern too.
+func allPackagesInFS(pattern string) []string {
+ pkgs := matchPackagesInFS(pattern)
+ if len(pkgs) == 0 {
+ fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
+ }
+ return pkgs
+}
+
+func matchPackagesInFS(pattern string) []string {
+ // Find directory to begin the scan.
+ // Could be smarter but this one optimization
+ // is enough for now, since ... is usually at the
+ // end of a path.
+ i := strings.Index(pattern, "...")
+ dir, _ := path.Split(pattern[:i])
+
+ // pattern begins with ./ or ../.
+ // path.Clean will discard the ./ but not the ../.
+ // We need to preserve the ./ for pattern matching
+ // and in the returned import paths.
+ prefix := ""
+ if strings.HasPrefix(pattern, "./") {
+ prefix = "./"
+ }
+ match := matchPattern(pattern)
+
+ var pkgs []string
+ filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
+ if err != nil || !fi.IsDir() {
+ return nil
+ }
+ if path == dir {
+ // filepath.Walk starts at dir and recurses. For the recursive case,
+ // the path is the result of filepath.Join, which calls filepath.Clean.
+ // The initial case is not Cleaned, though, so we do this explicitly.
+ //
+ // This converts a path like "./io/" to "io". Without this step, running
+ // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
+ // package, because prepending the prefix "./" to the unclean path would
+ // result in "././io", and match("././io") returns false.
+ path = filepath.Clean(path)
+ }
+
+ // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
+ _, elem := filepath.Split(path)
+ dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
+ if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ return filepath.SkipDir
+ }
+
+ name := prefix + filepath.ToSlash(path)
+ if !match(name) {
+ return nil
+ }
+ if _, err = build.ImportDir(path, 0); err != nil {
+ if _, noGo := err.(*build.NoGoError); !noGo {
+ log.Print(err)
+ }
+ return nil
+ }
+ pkgs = append(pkgs, name)
+ return nil
+ })
+ return pkgs
+}
diff --git a/vendor/github.com/golang/lint/lint.go b/vendor/github.com/golang/lint/lint.go
new file mode 100644
index 0000000..8bb1faa
--- /dev/null
+++ b/vendor/github.com/golang/lint/lint.go
@@ -0,0 +1,1697 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package lint contains a linter for Go source code.
+package lint
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+const styleGuideBase = "https://golang.org/wiki/CodeReviewComments"
+
+// A Linter lints Go source code.
+type Linter struct {
+}
+
+// Problem represents a problem in some source code.
+type Problem struct {
+ Position token.Position // position in source file
+ Text string // the prose that describes the problem
+ Link string // (optional) the link to the style guide for the problem
+ Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness
+ LineText string // the source line
+ Category string // a short name for the general category of the problem
+
+ // If the problem has a suggested fix (the minority case),
+ // ReplacementLine is a full replacement for the relevant line of the source file.
+ ReplacementLine string
+}
+
+func (p *Problem) String() string {
+ if p.Link != "" {
+ return p.Text + "\n\n" + p.Link
+ }
+ return p.Text
+}
+
+type byPosition []Problem
+
+func (p byPosition) Len() int { return len(p) }
+func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p byPosition) Less(i, j int) bool {
+ pi, pj := p[i].Position, p[j].Position
+
+ if pi.Filename != pj.Filename {
+ return pi.Filename < pj.Filename
+ }
+ if pi.Line != pj.Line {
+ return pi.Line < pj.Line
+ }
+ if pi.Column != pj.Column {
+ return pi.Column < pj.Column
+ }
+
+ return p[i].Text < p[j].Text
+}
+
+// Lint lints src.
+func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
+ return l.LintFiles(map[string][]byte{filename: src})
+}
+
+// LintFiles lints a set of files of a single package.
+// The argument is a map of filename to source.
+func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
+ pkg := &pkg{
+ fset: token.NewFileSet(),
+ files: make(map[string]*file),
+ }
+ var pkgName string
+ for filename, src := range files {
+ if isGenerated(src) {
+ continue // See issue #239
+ }
+ f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ if pkgName == "" {
+ pkgName = f.Name.Name
+ } else if f.Name.Name != pkgName {
+ return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName)
+ }
+ pkg.files[filename] = &file{
+ pkg: pkg,
+ f: f,
+ fset: pkg.fset,
+ src: src,
+ filename: filename,
+ }
+ }
+ if len(pkg.files) == 0 {
+ return nil, nil
+ }
+ return pkg.lint(), nil
+}
+
+var (
+ genHdr = []byte("// Code generated ")
+ genFtr = []byte(" DO NOT EDIT.")
+)
+
+// isGenerated reports whether the source file is generated code
+// according the rules from https://golang.org/s/generatedcode.
+func isGenerated(src []byte) bool {
+ sc := bufio.NewScanner(bytes.NewReader(src))
+ for sc.Scan() {
+ b := sc.Bytes()
+ if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
+ return true
+ }
+ }
+ return false
+}
+
+// pkg represents a package being linted.
+type pkg struct {
+ fset *token.FileSet
+ files map[string]*file
+
+ typesPkg *types.Package
+ typesInfo *types.Info
+
+ // sortable is the set of types in the package that implement sort.Interface.
+ sortable map[string]bool
+ // main is whether this is a "main" package.
+ main bool
+
+ problems []Problem
+}
+
+func (p *pkg) lint() []Problem {
+ if err := p.typeCheck(); err != nil {
+ /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages.
+ if e, ok := err.(types.Error); ok {
+ pos := p.fset.Position(e.Pos)
+ conf := 1.0
+ if strings.Contains(e.Msg, "can't find import: ") {
+ // Golint is probably being run in a context that doesn't support
+ // typechecking (e.g. package files aren't found), so don't warn about it.
+ conf = 0
+ }
+ if conf > 0 {
+ p.errorfAt(pos, conf, category("typechecking"), e.Msg)
+ }
+
+ // TODO(dsymonds): Abort if !e.Soft?
+ }
+ */
+ }
+
+ p.scanSortable()
+ p.main = p.isMain()
+
+ for _, f := range p.files {
+ f.lint()
+ }
+
+ sort.Sort(byPosition(p.problems))
+
+ return p.problems
+}
+
+// file represents a file being linted.
+type file struct {
+ pkg *pkg
+ f *ast.File
+ fset *token.FileSet
+ src []byte
+ filename string
+}
+
+func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
+
+func (f *file) lint() {
+ f.lintPackageComment()
+ f.lintImports()
+ f.lintBlankImports()
+ f.lintExported()
+ f.lintNames()
+ f.lintVarDecls()
+ f.lintElses()
+ f.lintIfError()
+ f.lintRanges()
+ f.lintErrorf()
+ f.lintErrors()
+ f.lintErrorStrings()
+ f.lintReceiverNames()
+ f.lintIncDec()
+ f.lintErrorReturn()
+ f.lintUnexportedReturn()
+ f.lintTimeNames()
+ f.lintContextKeyTypes()
+ f.lintContextArgs()
+}
+
+type link string
+type category string
+
+// The variadic arguments may start with link and category types,
+// and must end with a format string and any arguments.
+// It returns the new Problem.
+func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem {
+ pos := f.fset.Position(n.Pos())
+ if pos.Filename == "" {
+ pos.Filename = f.filename
+ }
+ return f.pkg.errorfAt(pos, confidence, args...)
+}
+
+func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem {
+ problem := Problem{
+ Position: pos,
+ Confidence: confidence,
+ }
+ if pos.Filename != "" {
+ // The file might not exist in our mapping if a //line directive was encountered.
+ if f, ok := p.files[pos.Filename]; ok {
+ problem.LineText = srcLine(f.src, pos)
+ }
+ }
+
+argLoop:
+ for len(args) > 1 { // always leave at least the format string in args
+ switch v := args[0].(type) {
+ case link:
+ problem.Link = string(v)
+ case category:
+ problem.Category = string(v)
+ default:
+ break argLoop
+ }
+ args = args[1:]
+ }
+
+ problem.Text = fmt.Sprintf(args[0].(string), args[1:]...)
+
+ p.problems = append(p.problems, problem)
+ return &p.problems[len(p.problems)-1]
+}
+
+var newImporter = func(fset *token.FileSet) types.ImporterFrom {
+ return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
+}
+
+func (p *pkg) typeCheck() error {
+ config := &types.Config{
+ // By setting a no-op error reporter, the type checker does as much work as possible.
+ Error: func(error) {},
+ Importer: newImporter(p.fset),
+ }
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+ var anyFile *file
+ var astFiles []*ast.File
+ for _, f := range p.files {
+ anyFile = f
+ astFiles = append(astFiles, f.f)
+ }
+ pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info)
+ // Remember the typechecking info, even if config.Check failed,
+ // since we will get partial information.
+ p.typesPkg = pkg
+ p.typesInfo = info
+ return err
+}
+
+func (p *pkg) typeOf(expr ast.Expr) types.Type {
+ if p.typesInfo == nil {
+ return nil
+ }
+ return p.typesInfo.TypeOf(expr)
+}
+
+func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool {
+ n, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ tn := n.Obj()
+ return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name
+}
+
+// scopeOf returns the tightest scope encompassing id.
+func (p *pkg) scopeOf(id *ast.Ident) *types.Scope {
+ var scope *types.Scope
+ if obj := p.typesInfo.ObjectOf(id); obj != nil {
+ scope = obj.Parent()
+ }
+ if scope == p.typesPkg.Scope() {
+ // We were given a top-level identifier.
+ // Use the file-level scope instead of the package-level scope.
+ pos := id.Pos()
+ for _, f := range p.files {
+ if f.f.Pos() <= pos && pos < f.f.End() {
+ scope = p.typesInfo.Scopes[f.f]
+ break
+ }
+ }
+ }
+ return scope
+}
+
+func (p *pkg) scanSortable() {
+ p.sortable = make(map[string]bool)
+
+ // bitfield for which methods exist on each type.
+ const (
+ Len = 1 << iota
+ Less
+ Swap
+ )
+ nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
+ has := make(map[string]int)
+ for _, f := range p.files {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+ return true
+ }
+ // TODO(dsymonds): We could check the signature to be more precise.
+ recv := receiverType(fn)
+ if i, ok := nmap[fn.Name.Name]; ok {
+ has[recv] |= i
+ }
+ return false
+ })
+ }
+ for typ, ms := range has {
+ if ms == Len|Less|Swap {
+ p.sortable[typ] = true
+ }
+ }
+}
+
+func (p *pkg) isMain() bool {
+ for _, f := range p.files {
+ if f.isMain() {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *file) isMain() bool {
+ if f.f.Name.Name == "main" {
+ return true
+ }
+ return false
+}
+
+// lintPackageComment checks package comments. It complains if
+// there is no package comment, or if it is not of the right form.
+// This has a notable false positive in that a package comment
+// could rightfully appear in a different file of the same package,
+// but that's not easy to fix since this linter is file-oriented.
+func (f *file) lintPackageComment() {
+ if f.isTest() {
+ return
+ }
+
+ const ref = styleGuideBase + "#package-comments"
+ prefix := "Package " + f.f.Name.Name + " "
+
+ // Look for a detached package comment.
+ // First, scan for the last comment that occurs before the "package" keyword.
+ var lastCG *ast.CommentGroup
+ for _, cg := range f.f.Comments {
+ if cg.Pos() > f.f.Package {
+ // Gone past "package" keyword.
+ break
+ }
+ lastCG = cg
+ }
+ if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) {
+ endPos := f.fset.Position(lastCG.End())
+ pkgPos := f.fset.Position(f.f.Package)
+ if endPos.Line+1 < pkgPos.Line {
+ // There isn't a great place to anchor this error;
+ // the start of the blank lines between the doc and the package statement
+ // is at least pointing at the location of the problem.
+ pos := token.Position{
+ Filename: endPos.Filename,
+ // Offset not set; it is non-trivial, and doesn't appear to be needed.
+ Line: endPos.Line + 1,
+ Column: 1,
+ }
+ f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement")
+ return
+ }
+ }
+
+ if f.f.Doc == nil {
+ f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package")
+ return
+ }
+ s := f.f.Doc.Text()
+ if ts := strings.TrimLeft(s, " \t"); ts != s {
+ f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space")
+ s = ts
+ }
+ // Only non-main packages need to keep to this form.
+ if !f.pkg.main && !strings.HasPrefix(s, prefix) {
+ f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix)
+ }
+}
+
+// lintBlankImports complains if a non-main package has blank imports that are
+// not documented.
+func (f *file) lintBlankImports() {
+ // In package main and in tests, we don't complain about blank imports.
+ if f.pkg.main || f.isTest() {
+ return
+ }
+
+ // The first element of each contiguous group of blank imports should have
+ // an explanatory comment of some kind.
+ for i, imp := range f.f.Imports {
+ pos := f.fset.Position(imp.Pos())
+
+ if !isBlank(imp.Name) {
+ continue // Ignore non-blank imports.
+ }
+ if i > 0 {
+ prev := f.f.Imports[i-1]
+ prevPos := f.fset.Position(prev.Pos())
+ if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
+ continue // A subsequent blank in a group.
+ }
+ }
+
+ // This is the first blank import of a group.
+ if imp.Doc == nil && imp.Comment == nil {
+ ref := ""
+ f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it")
+ }
+ }
+}
+
+// lintImports examines import blocks.
+func (f *file) lintImports() {
+ for i, is := range f.f.Imports {
+ _ = i
+ if is.Name != nil && is.Name.Name == "." && !f.isTest() {
+ f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports")
+ }
+
+ }
+}
+
+const docCommentsLink = styleGuideBase + "#doc-comments"
+
+// lintExported examines the exported names.
+// It complains if any required doc comments are missing,
+// or if they are not of the right form. The exact rules are in
+// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
+// also tracks the GenDecl structure being traversed to permit
+// doc comments for constants to be on top of the const block.
+// It also complains if the names stutter when combined with
+// the package name.
+func (f *file) lintExported() {
+ if f.isTest() {
+ return
+ }
+
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ // Set of GenDecls that have already had missing comments flagged.
+ genDeclMissingComments := make(map[*ast.GenDecl]bool)
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return false
+ }
+ // token.CONST, token.TYPE or token.VAR
+ lastGen = v
+ return true
+ case *ast.FuncDecl:
+ f.lintFuncDoc(v)
+ if v.Recv == nil {
+ // Only check for stutter on functions, not methods.
+ // Method names are not used package-qualified.
+ f.checkStutter(v.Name, "func")
+ }
+ // Don't proceed inside funcs.
+ return false
+ case *ast.TypeSpec:
+ // inside a GenDecl, which usually has the doc
+ doc := v.Doc
+ if doc == nil {
+ doc = lastGen.Doc
+ }
+ f.lintTypeDoc(v, doc)
+ f.checkStutter(v.Name, "type")
+ // Don't proceed inside types.
+ return false
+ case *ast.ValueSpec:
+ f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
+ return false
+ }
+ return true
+ })
+}
+
+var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+
+// knownNameExceptions is a set of names that are known to be exempt from naming checks.
+// This is usually because they are constrained by having to match names in the
+// standard library.
+var knownNameExceptions = map[string]bool{
+ "LastInsertId": true, // must match database/sql
+ "kWh": true,
+}
+
+// lintNames examines all names in the file.
+// It complains if any use underscores or incorrect known initialisms.
+func (f *file) lintNames() {
+ // Package names need slightly different handling than other names.
+ if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
+ f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name")
+ }
+
+ check := func(id *ast.Ident, thing string) {
+ if id.Name == "_" {
+ return
+ }
+ if knownNameExceptions[id.Name] {
+ return
+ }
+
+ // Handle two common styles from other languages that don't belong in Go.
+ if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
+ return
+ }
+ if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
+ should := string(id.Name[1]+'a'-'A') + id.Name[2:]
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+ }
+
+ should := lintName(id.Name)
+ if id.Name == should {
+ return
+ }
+
+ if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
+ f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
+ return
+ }
+ f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should)
+ }
+ checkList := func(fl *ast.FieldList, thing string) {
+ if fl == nil {
+ return
+ }
+ for _, f := range fl.List {
+ for _, id := range f.Names {
+ check(id, thing)
+ }
+ }
+ }
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.AssignStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ for _, exp := range v.Lhs {
+ if id, ok := exp.(*ast.Ident); ok {
+ check(id, "var")
+ }
+ }
+ case *ast.FuncDecl:
+ if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
+ return true
+ }
+
+ thing := "func"
+ if v.Recv != nil {
+ thing = "method"
+ }
+
+ // Exclude naming warnings for functions that are exported to C but
+ // not exported in the Go API.
+ // See https://github.com/golang/lint/issues/144.
+ if ast.IsExported(v.Name.Name) || !isCgoExported(v) {
+ check(v.Name, thing)
+ }
+
+ checkList(v.Type.Params, thing+" parameter")
+ checkList(v.Type.Results, thing+" result")
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return true
+ }
+ var thing string
+ switch v.Tok {
+ case token.CONST:
+ thing = "const"
+ case token.TYPE:
+ thing = "type"
+ case token.VAR:
+ thing = "var"
+ }
+ for _, spec := range v.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ check(s.Name, thing)
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ check(id, thing)
+ }
+ }
+ }
+ case *ast.InterfaceType:
+ // Do not check interface method names.
+ // They are often constrainted by the method names of concrete types.
+ for _, x := range v.Methods.List {
+ ft, ok := x.Type.(*ast.FuncType)
+ if !ok { // might be an embedded interface name
+ continue
+ }
+ checkList(ft.Params, "interface method parameter")
+ checkList(ft.Results, "interface method result")
+ }
+ case *ast.RangeStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ if id, ok := v.Key.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ if id, ok := v.Value.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ case *ast.StructType:
+ for _, f := range v.Fields.List {
+ for _, id := range f.Names {
+ check(id, "struct field")
+ }
+ }
+ }
+ return true
+ })
+}
+
+// lintName returns a different name if it should be different.
+func lintName(name string) (should string) {
+ // Fast path for simple cases: "_" and all lowercase.
+ if name == "_" {
+ return name
+ }
+ allLower := true
+ for _, r := range name {
+ if !unicode.IsLower(r) {
+ allLower = false
+ break
+ }
+ }
+ if allLower {
+ return name
+ }
+
+ // Split camelCase at any lower->upper transition, and split on underscores.
+ // Check each word for common initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // index of start of word, scan
+ for i+1 <= len(runes) {
+ eow := false // whether we hit the end of a word
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' {
+ // underscore; shift the remainder forward over any run of underscores
+ eow = true
+ n := 1
+ for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+ n++
+ }
+
+ // Leave at most one underscore if the underscore is between two digits
+ if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
+ n--
+ }
+
+ copy(runes[i+1:], runes[i+n+1:])
+ runes = runes[:len(runes)-n]
+ } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+ // lower->non-lower
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w,i) is a word.
+ word := string(runes[w:i])
+ if u := strings.ToUpper(word); commonInitialisms[u] {
+ // Keep consistent case, which is lowercase only at the start.
+ if w == 0 && unicode.IsLower(runes[w]) {
+ u = strings.ToLower(u)
+ }
+ // All the common initialisms are ASCII,
+ // so we can replace the bytes exactly.
+ copy(runes[w:], []rune(u))
+ } else if w > 0 && strings.ToLower(word) == word {
+ // already all lowercase, and not the first word, so uppercase the first character.
+ runes[w] = unicode.ToUpper(runes[w])
+ }
+ w = i
+ }
+ return string(runes)
+}
+
+// commonInitialisms is a set of common initialisms.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var commonInitialisms = map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTP": true,
+ "HTTPS": true,
+ "ID": true,
+ "IP": true,
+ "JSON": true,
+ "LHS": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+}
+
+// lintTypeDoc examines the doc comment on a type.
+// It complains if they are missing from an exported type,
+// or if they are not of the standard form.
+func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) {
+ if !ast.IsExported(t.Name.Name) {
+ return
+ }
+ if doc == nil {
+ f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name)
+ return
+ }
+
+ s := doc.Text()
+ articles := [...]string{"A", "An", "The"}
+ for _, a := range articles {
+ if strings.HasPrefix(s, a+" ") {
+ s = s[len(a)+1:]
+ break
+ }
+ }
+ if !strings.HasPrefix(s, t.Name.Name+" ") {
+ f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name)
+ }
+}
+
+var commonMethods = map[string]bool{
+ "Error": true,
+ "Read": true,
+ "ServeHTTP": true,
+ "String": true,
+ "Write": true,
+}
+
+// lintFuncDoc examines doc comments on functions and methods.
+// It complains if they are missing, or not of the right form.
+// It has specific exclusions for well-known methods (see commonMethods above).
+func (f *file) lintFuncDoc(fn *ast.FuncDecl) {
+ if !ast.IsExported(fn.Name.Name) {
+ // func is unexported
+ return
+ }
+ kind := "function"
+ name := fn.Name.Name
+ if fn.Recv != nil && len(fn.Recv.List) > 0 {
+ // method
+ kind = "method"
+ recv := receiverType(fn)
+ if !ast.IsExported(recv) {
+ // receiver is unexported
+ return
+ }
+ if commonMethods[name] {
+ return
+ }
+ switch name {
+ case "Len", "Less", "Swap":
+ if f.pkg.sortable[recv] {
+ return
+ }
+ }
+ name = recv + "." + name
+ }
+ if fn.Doc == nil {
+ f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name)
+ return
+ }
+ s := fn.Doc.Text()
+ prefix := fn.Name.Name + " "
+ if !strings.HasPrefix(s, prefix) {
+ f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+// lintValueSpecDoc examines package-global variables and constants.
+// It complains if they are not individually declared,
+// or if they are not suitably documented in the right form (unless they are in a block that is commented).
+func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) {
+ kind := "var"
+ if gd.Tok == token.CONST {
+ kind = "const"
+ }
+
+ if len(vs.Names) > 1 {
+ // Check that none are exported except for the first.
+ for _, n := range vs.Names[1:] {
+ if ast.IsExported(n.Name) {
+ f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name)
+ return
+ }
+ }
+ }
+
+ // Only one name.
+ name := vs.Names[0].Name
+ if !ast.IsExported(name) {
+ return
+ }
+
+ if vs.Doc == nil && gd.Doc == nil {
+ if genDeclMissingComments[gd] {
+ return
+ }
+ block := ""
+ if kind == "const" && gd.Lparen.IsValid() {
+ block = " (or a comment on this block)"
+ }
+ f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block)
+ genDeclMissingComments[gd] = true
+ return
+ }
+ // If this GenDecl has parens and a comment, we don't check its comment form.
+ if gd.Lparen.IsValid() && gd.Doc != nil {
+ return
+ }
+ // The relevant text to check will be on either vs.Doc or gd.Doc.
+ // Use vs.Doc preferentially.
+ doc := vs.Doc
+ if doc == nil {
+ doc = gd.Doc
+ }
+ prefix := name + " "
+ if !strings.HasPrefix(doc.Text(), prefix) {
+ f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+func (f *file) checkStutter(id *ast.Ident, thing string) {
+ pkg, name := f.f.Name.Name, id.Name
+ if !ast.IsExported(name) {
+ // unexported name
+ return
+ }
+ // A name stutters if the package name is a strict prefix
+ // and the next character of the name starts a new word.
+ if len(name) <= len(pkg) {
+ // name is too short to stutter.
+ // This permits the name to be the same as the package name.
+ return
+ }
+ if !strings.EqualFold(pkg, name[:len(pkg)]) {
+ return
+ }
+ // We can assume the name is well-formed UTF-8.
+ // If the next rune after the package name is uppercase or an underscore
+ // the it's starting a new word and thus this name stutters.
+ rem := name[len(pkg):]
+ if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) {
+ f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem)
+ }
+}
+
+// zeroLiteral is a set of ast.BasicLit values that are zero values.
+// It is not exhaustive.
+var zeroLiteral = map[string]bool{
+ "false": true, // bool
+ // runes
+ `'\x00'`: true,
+ `'\000'`: true,
+ // strings
+ `""`: true,
+ "``": true,
+ // numerics
+ "0": true,
+ "0.": true,
+ "0.0": true,
+ "0i": true,
+}
+
+// lintVarDecls examines variable declarations. It complains about declarations with
+// redundant LHS types that can be inferred from the RHS.
+func (f *file) lintVarDecls() {
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok != token.CONST && v.Tok != token.VAR {
+ return false
+ }
+ lastGen = v
+ return true
+ case *ast.ValueSpec:
+ if lastGen.Tok == token.CONST {
+ return false
+ }
+ if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 {
+ return false
+ }
+ rhs := v.Values[0]
+ // An underscore var appears in a common idiom for compile-time interface satisfaction,
+ // as in "var _ Interface = (*Concrete)(nil)".
+ if isIdent(v.Names[0], "_") {
+ return false
+ }
+ // If the RHS is a zero value, suggest dropping it.
+ zero := false
+ if lit, ok := rhs.(*ast.BasicLit); ok {
+ zero = zeroLiteral[lit.Value]
+ } else if isIdent(rhs, "nil") {
+ zero = true
+ }
+ if zero {
+ f.errorf(rhs, 0.9, category("zero-value"), "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0])
+ return false
+ }
+ lhsTyp := f.pkg.typeOf(v.Type)
+ rhsTyp := f.pkg.typeOf(rhs)
+
+ if !validType(lhsTyp) || !validType(rhsTyp) {
+ // Type checking failed (often due to missing imports).
+ return false
+ }
+
+ if !types.Identical(lhsTyp, rhsTyp) {
+ // Assignment to a different type is not redundant.
+ return false
+ }
+
+ // The next three conditions are for suppressing the warning in situations
+ // where we were unable to typecheck.
+
+ // If the LHS type is an interface, don't warn, since it is probably a
+ // concrete type on the RHS. Note that our feeble lexical check here
+ // will only pick up interface{} and other literal interface types;
+ // that covers most of the cases we care to exclude right now.
+ if _, ok := v.Type.(*ast.InterfaceType); ok {
+ return false
+ }
+ // If the RHS is an untyped const, only warn if the LHS type is its default type.
+ if defType, ok := f.isUntypedConst(rhs); ok && !isIdent(v.Type, defType) {
+ return false
+ }
+
+ f.errorf(v.Type, 0.8, category("type-inference"), "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0])
+ return false
+ }
+ return true
+ })
+}
+
+func validType(T types.Type) bool {
+ return T != nil &&
+ T != types.Typ[types.Invalid] &&
+ !strings.Contains(T.String(), "invalid type") // good but not foolproof
+}
+
+// lintElses examines else blocks. It complains about any else block whose if block ends in a return.
+func (f *file) lintElses() {
+ // We don't want to flag if { } else if { } else { } constructions.
+ // They will appear as an IfStmt whose Else field is also an IfStmt.
+ // Record such a node so we ignore it when we visit it.
+ ignore := make(map[*ast.IfStmt]bool)
+
+ f.walk(func(node ast.Node) bool {
+ ifStmt, ok := node.(*ast.IfStmt)
+ if !ok || ifStmt.Else == nil {
+ return true
+ }
+ if ignore[ifStmt] {
+ return true
+ }
+ if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
+ ignore[elseif] = true
+ return true
+ }
+ if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
+ // only care about elses without conditions
+ return true
+ }
+ if len(ifStmt.Body.List) == 0 {
+ return true
+ }
+ shortDecl := false // does the if statement have a ":=" initialization statement?
+ if ifStmt.Init != nil {
+ if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE {
+ shortDecl = true
+ }
+ }
+ lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1]
+ if _, ok := lastStmt.(*ast.ReturnStmt); ok {
+ extra := ""
+ if shortDecl {
+ extra = " (move short variable declaration to its own line if necessary)"
+ }
+ f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra)
+ }
+ return true
+ })
+}
+
+// lintRanges examines range clauses. It complains about redundant constructions.
+func (f *file) lintRanges() {
+ f.walk(func(node ast.Node) bool {
+ rs, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+ if rs.Value == nil {
+ // for x = range m { ... }
+ return true // single var form
+ }
+ if !isIdent(rs.Value, "_") {
+ // for ?, y = range m { ... }
+ return true
+ }
+
+ p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
+
+ newRS := *rs // shallow copy
+ newRS.Value = nil
+ p.ReplacementLine = f.firstLineOf(&newRS, rs)
+
+ return true
+ })
+}
+
+// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation.
+func (f *file) lintErrorf() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok || len(ce.Args) != 1 {
+ return true
+ }
+ isErrorsNew := isPkgDot(ce.Fun, "errors", "New")
+ var isTestingError bool
+ se, ok := ce.Fun.(*ast.SelectorExpr)
+ if ok && se.Sel.Name == "Error" {
+ if typ := f.pkg.typeOf(se.X); typ != nil {
+ isTestingError = typ.String() == "*testing.T"
+ }
+ }
+ if !isErrorsNew && !isTestingError {
+ return true
+ }
+ arg := ce.Args[0]
+ ce, ok = arg.(*ast.CallExpr)
+ if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
+ return true
+ }
+ errorfPrefix := "fmt"
+ if isTestingError {
+ errorfPrefix = f.render(se.X)
+ }
+ p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix)
+
+ m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`)
+ if m != nil {
+ p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3]
+ }
+
+ return true
+ })
+}
+
+// lintErrors examines global error vars. It complains if they aren't named in the standard way.
+func (f *file) lintErrors() {
+ for _, decl := range f.f.Decls {
+ gd, ok := decl.(*ast.GenDecl)
+ if !ok || gd.Tok != token.VAR {
+ continue
+ }
+ for _, spec := range gd.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Names) != 1 || len(spec.Values) != 1 {
+ continue
+ }
+ ce, ok := spec.Values[0].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ continue
+ }
+
+ id := spec.Names[0]
+ prefix := "err"
+ if id.IsExported() {
+ prefix = "Err"
+ }
+ if !strings.HasPrefix(id.Name, prefix) {
+ f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix)
+ }
+ }
+ }
+}
+
+func lintErrorString(s string) (isClean bool, conf float64) {
+ const basicConfidence = 0.8
+ const capConfidence = basicConfidence - 0.2
+ first, firstN := utf8.DecodeRuneInString(s)
+ last, _ := utf8.DecodeLastRuneInString(s)
+ if last == '.' || last == ':' || last == '!' || last == '\n' {
+ return false, basicConfidence
+ }
+ if unicode.IsUpper(first) {
+ // People use proper nouns and exported Go identifiers in error strings,
+ // so decrease the confidence of warnings for capitalization.
+ if len(s) <= firstN {
+ return false, capConfidence
+ }
+ // Flag strings starting with something that doesn't look like an initialism.
+ if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) {
+ return false, capConfidence
+ }
+ }
+ return true, 0
+}
+
+// lintErrorStrings examines error strings.
+// It complains if they are capitalized or end in punctuation or a newline.
+func (f *file) lintErrorStrings() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ return true
+ }
+ if len(ce.Args) < 1 {
+ return true
+ }
+ str, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok || str.Kind != token.STRING {
+ return true
+ }
+ s, _ := strconv.Unquote(str.Value) // can assume well-formed Go
+ if s == "" {
+ return true
+ }
+ clean, conf := lintErrorString(s)
+ if clean {
+ return true
+ }
+
+ f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"),
+ "error strings should not be capitalized or end with punctuation or a newline")
+ return true
+ })
+}
+
+// lintReceiverNames examines receiver names. It complains about inconsistent
+// names used for the same type and names such as "this".
+func (f *file) lintReceiverNames() {
+ typeReceiver := map[string]string{}
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+ return true
+ }
+ names := fn.Recv.List[0].Names
+ if len(names) < 1 {
+ return true
+ }
+ name := names[0].Name
+ const ref = styleGuideBase + "#receiver-names"
+ if name == "_" {
+ f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`)
+ return true
+ }
+ if name == "this" || name == "self" {
+ f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
+ return true
+ }
+ recv := receiverType(fn)
+ if prev, ok := typeReceiver[recv]; ok && prev != name {
+ f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv)
+ return true
+ }
+ typeReceiver[recv] = name
+ return true
+ })
+}
+
+// lintIncDec examines statements that increment or decrement a variable.
+// It complains if they don't use x++ or x--.
+func (f *file) lintIncDec() {
+ f.walk(func(n ast.Node) bool {
+ as, ok := n.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if len(as.Lhs) != 1 {
+ return true
+ }
+ if !isOne(as.Rhs[0]) {
+ return true
+ }
+ var suffix string
+ switch as.Tok {
+ case token.ADD_ASSIGN:
+ suffix = "++"
+ case token.SUB_ASSIGN:
+ suffix = "--"
+ default:
+ return true
+ }
+ f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix)
+ return true
+ })
+}
+
+// lintErrorReturn examines function declarations that return an error.
+// It complains if the error isn't the last parameter.
+func (f *file) lintErrorReturn() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Type.Results == nil {
+ return true
+ }
+ ret := fn.Type.Results.List
+ if len(ret) <= 1 {
+ return true
+ }
+ // An error return parameter should be the last parameter.
+ // Flag any error parameters found before the last.
+ for _, r := range ret[:len(ret)-1] {
+ if isIdent(r.Type, "error") {
+ f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items")
+ break // only flag one
+ }
+ }
+ return true
+ })
+}
+
+// lintUnexportedReturn examines exported function declarations.
+// It complains if any return an unexported type.
+func (f *file) lintUnexportedReturn() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok {
+ return true
+ }
+ if fn.Type.Results == nil {
+ return false
+ }
+ if !fn.Name.IsExported() {
+ return false
+ }
+ thing := "func"
+ if fn.Recv != nil && len(fn.Recv.List) > 0 {
+ thing = "method"
+ if !ast.IsExported(receiverType(fn)) {
+ // Don't report exported methods of unexported types,
+ // such as private implementations of sort.Interface.
+ return false
+ }
+ }
+ for _, ret := range fn.Type.Results.List {
+ typ := f.pkg.typeOf(ret.Type)
+ if exportedType(typ) {
+ continue
+ }
+ f.errorf(ret.Type, 0.8, category("unexported-type-in-api"),
+ "exported %s %s returns unexported type %s, which can be annoying to use",
+ thing, fn.Name.Name, typ)
+ break // only flag one
+ }
+ return false
+ })
+}
+
+// exportedType reports whether typ is an exported type.
+// It is imprecise, and will err on the side of returning true,
+// such as for composite types.
+func exportedType(typ types.Type) bool {
+ switch T := typ.(type) {
+ case *types.Named:
+ // Builtin types have no package.
+ return T.Obj().Pkg() == nil || T.Obj().Exported()
+ case *types.Map:
+ return exportedType(T.Key()) && exportedType(T.Elem())
+ case interface {
+ Elem() types.Type
+ }: // array, slice, pointer, chan
+ return exportedType(T.Elem())
+ }
+ // Be conservative about other types, such as struct, interface, etc.
+ return true
+}
+
+// timeSuffixes is a list of name suffixes that imply a time unit.
+// This is not an exhaustive list.
+var timeSuffixes = []string{
+ "Sec", "Secs", "Seconds",
+ "Msec", "Msecs",
+ "Milli", "Millis", "Milliseconds",
+ "Usec", "Usecs", "Microseconds",
+ "MS", "Ms",
+}
+
+func (f *file) lintTimeNames() {
+ f.walk(func(node ast.Node) bool {
+ v, ok := node.(*ast.ValueSpec)
+ if !ok {
+ return true
+ }
+ for _, name := range v.Names {
+ origTyp := f.pkg.typeOf(name)
+ // Look for time.Duration or *time.Duration;
+ // the latter is common when using flag.Duration.
+ typ := origTyp
+ if pt, ok := typ.(*types.Pointer); ok {
+ typ = pt.Elem()
+ }
+ if !f.pkg.isNamedType(typ, "time", "Duration") {
+ continue
+ }
+ suffix := ""
+ for _, suf := range timeSuffixes {
+ if strings.HasSuffix(name.Name, suf) {
+ suffix = suf
+ break
+ }
+ }
+ if suffix == "" {
+ continue
+ }
+ f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix)
+ }
+ return true
+ })
+}
+
+// lintContextKeyTypes checks for call expressions to context.WithValue with
+// basic types used for the key argument.
+// See: https://golang.org/issue/17293
+func (f *file) lintContextKeyTypes() {
+ f.walk(func(node ast.Node) bool {
+ switch node := node.(type) {
+ case *ast.CallExpr:
+ f.checkContextKeyType(node)
+ }
+
+ return true
+ })
+}
+
+// checkContextKeyType reports an error if the call expression calls
+// context.WithValue with a key argument of basic type.
+func (f *file) checkContextKeyType(x *ast.CallExpr) {
+ sel, ok := x.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ pkg, ok := sel.X.(*ast.Ident)
+ if !ok || pkg.Name != "context" {
+ return
+ }
+ if sel.Sel.Name != "WithValue" {
+ return
+ }
+
+ // key is second argument to context.WithValue
+ if len(x.Args) != 3 {
+ return
+ }
+ key := f.pkg.typesInfo.Types[x.Args[1]]
+
+ if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid {
+ f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type))
+ }
+}
+
+// lintContextArgs examines function declarations that contain an
+// argument with a type of context.Context
+// It complains if that argument isn't the first parameter.
+func (f *file) lintContextArgs() {
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || len(fn.Type.Params.List) <= 1 {
+ return true
+ }
+ // A context.Context should be the first parameter of a function.
+ // Flag any that show up after the first.
+ for _, arg := range fn.Type.Params.List[1:] {
+ if isPkgDot(arg.Type, "context", "Context") {
+ f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function")
+ break // only flag one
+ }
+ }
+ return true
+ })
+}
+
+// containsComments returns whether the interval [start, end) contains any
+// comments without "// MATCH " prefix.
+func (f *file) containsComments(start, end token.Pos) bool {
+ for _, cgroup := range f.f.Comments {
+ comments := cgroup.List
+ if comments[0].Slash >= end {
+ // All comments starting with this group are after end pos.
+ return false
+ }
+ if comments[len(comments)-1].Slash < start {
+ // Comments group ends before start pos.
+ continue
+ }
+ for _, c := range comments {
+ if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (f *file) lintIfError() {
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.BlockStmt:
+ for i := 0; i < len(v.List)-1; i++ {
+ // if var := whatever; var != nil { return var }
+ s, ok := v.List[i].(*ast.IfStmt)
+ if !ok || s.Body == nil || len(s.Body.List) != 1 || s.Else != nil {
+ continue
+ }
+ assign, ok := s.Init.(*ast.AssignStmt)
+ if !ok || len(assign.Lhs) != 1 || !(assign.Tok == token.DEFINE || assign.Tok == token.ASSIGN) {
+ continue
+ }
+ id, ok := assign.Lhs[0].(*ast.Ident)
+ if !ok {
+ continue
+ }
+ expr, ok := s.Cond.(*ast.BinaryExpr)
+ if !ok || expr.Op != token.NEQ {
+ continue
+ }
+ if lhs, ok := expr.X.(*ast.Ident); !ok || lhs.Name != id.Name {
+ continue
+ }
+ if rhs, ok := expr.Y.(*ast.Ident); !ok || rhs.Name != "nil" {
+ continue
+ }
+ r, ok := s.Body.List[0].(*ast.ReturnStmt)
+ if !ok || len(r.Results) != 1 {
+ continue
+ }
+ if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != id.Name {
+ continue
+ }
+
+ // return nil
+ r, ok = v.List[i+1].(*ast.ReturnStmt)
+ if !ok || len(r.Results) != 1 {
+ continue
+ }
+ if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != "nil" {
+ continue
+ }
+
+ // check if there are any comments explaining the construct, don't emit an error if there are some.
+ if f.containsComments(s.Pos(), r.Pos()) {
+ continue
+ }
+
+ f.errorf(v.List[i], 0.9, "redundant if ...; err != nil check, just return error instead.")
+ }
+ }
+ return true
+ })
+}
+
+// receiverType returns the named type of the method receiver, sans "*",
+// or "invalid-type" if fn.Recv is ill formed.
+func receiverType(fn *ast.FuncDecl) string {
+ switch e := fn.Recv.List[0].Type.(type) {
+ case *ast.Ident:
+ return e.Name
+ case *ast.StarExpr:
+ if id, ok := e.X.(*ast.Ident); ok {
+ return id.Name
+ }
+ }
+ // The parser accepts much more than just the legal forms.
+ return "invalid-type"
+}
+
+func (f *file) walk(fn func(ast.Node) bool) {
+ ast.Walk(walker(fn), f.f)
+}
+
+func (f *file) render(x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, f.fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func (f *file) debugRender(x interface{}) string {
+ var buf bytes.Buffer
+ if err := ast.Fprint(&buf, f.fset, x, nil); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+// walker adapts a function to satisfy the ast.Visitor interface.
+// The function return whether the walk should proceed into the node's children.
+type walker func(ast.Node) bool
+
+func (w walker) Visit(node ast.Node) ast.Visitor {
+ if w(node) {
+ return w
+ }
+ return nil
+}
+
+func isIdent(expr ast.Expr, ident string) bool {
+ id, ok := expr.(*ast.Ident)
+ return ok && id.Name == ident
+}
+
+// isBlank returns whether id is the blank identifier "_".
+// If id == nil, the answer is false.
+func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" }
+
+func isPkgDot(expr ast.Expr, pkg, name string) bool {
+ sel, ok := expr.(*ast.SelectorExpr)
+ return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
+}
+
+func isOne(expr ast.Expr) bool {
+ lit, ok := expr.(*ast.BasicLit)
+ return ok && lit.Kind == token.INT && lit.Value == "1"
+}
+
+func isCgoExported(f *ast.FuncDecl) bool {
+ if f.Recv != nil || f.Doc == nil {
+ return false
+ }
+
+ cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name)))
+ for _, c := range f.Doc.List {
+ if cgoExport.MatchString(c.Text) {
+ return true
+ }
+ }
+ return false
+}
+
+var basicTypeKinds = map[types.BasicKind]string{
+ types.UntypedBool: "bool",
+ types.UntypedInt: "int",
+ types.UntypedRune: "rune",
+ types.UntypedFloat: "float64",
+ types.UntypedComplex: "complex128",
+ types.UntypedString: "string",
+}
+
+// isUntypedConst reports whether expr is an untyped constant,
+// and indicates what its default type is.
+// scope may be nil.
+func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) {
+ // Re-evaluate expr outside of its context to see if it's untyped.
+ // (An expr evaluated within, for example, an assignment context will get the type of the LHS.)
+ exprStr := f.render(expr)
+ tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr)
+ if err != nil {
+ return "", false
+ }
+ if b, ok := tv.Type.(*types.Basic); ok {
+ if dt, ok := basicTypeKinds[b.Kind()]; ok {
+ return dt, true
+ }
+ }
+
+ return "", false
+}
+
+// firstLineOf renders the given node and returns its first line.
+// It will also match the indentation of another node.
+func (f *file) firstLineOf(node, match ast.Node) string {
+ line := f.render(node)
+ if i := strings.Index(line, "\n"); i >= 0 {
+ line = line[:i]
+ }
+ return f.indentOf(match) + line
+}
+
+func (f *file) indentOf(node ast.Node) string {
+ line := srcLine(f.src, f.fset.Position(node.Pos()))
+ for i, r := range line {
+ switch r {
+ case ' ', '\t':
+ default:
+ return line[:i]
+ }
+ }
+ return line // unusual or empty line
+}
+
+func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) {
+ line := srcLine(f.src, f.fset.Position(node.Pos()))
+ line = strings.TrimSuffix(line, "\n")
+ rx := regexp.MustCompile(pattern)
+ return rx.FindStringSubmatch(line)
+}
+
+// srcLine returns the complete line at p, including the terminating newline.
+func srcLine(src []byte, p token.Position) string {
+ // Run to end of line in both directions if not at line start/end.
+ lo, hi := p.Offset, p.Offset+1
+ for lo > 0 && src[lo-1] != '\n' {
+ lo--
+ }
+ for hi < len(src) && src[hi-1] != '\n' {
+ hi++
+ }
+ return string(src[lo:hi])
+}
diff --git a/vendor/github.com/golang/lint/lint_test.go b/vendor/github.com/golang/lint/lint_test.go
new file mode 100644
index 0000000..92db596
--- /dev/null
+++ b/vendor/github.com/golang/lint/lint_test.go
@@ -0,0 +1,317 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package lint
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+var lintMatch = flag.String("lint.match", "", "restrict testdata matches to this pattern")
+
+func TestAll(t *testing.T) {
+ l := new(Linter)
+ rx, err := regexp.Compile(*lintMatch)
+ if err != nil {
+ t.Fatalf("Bad -lint.match value %q: %v", *lintMatch, err)
+ }
+
+ baseDir := "testdata"
+ fis, err := ioutil.ReadDir(baseDir)
+ if err != nil {
+ t.Fatalf("ioutil.ReadDir: %v", err)
+ }
+ if len(fis) == 0 {
+ t.Fatalf("no files in %v", baseDir)
+ }
+ for _, fi := range fis {
+ if !rx.MatchString(fi.Name()) {
+ continue
+ }
+ //t.Logf("Testing %s", fi.Name())
+ src, err := ioutil.ReadFile(path.Join(baseDir, fi.Name()))
+ if err != nil {
+ t.Fatalf("Failed reading %s: %v", fi.Name(), err)
+ }
+
+ ins := parseInstructions(t, fi.Name(), src)
+ if ins == nil {
+ t.Errorf("Test file %v does not have instructions", fi.Name())
+ continue
+ }
+
+ ps, err := l.Lint(fi.Name(), src)
+ if err != nil {
+ t.Errorf("Linting %s: %v", fi.Name(), err)
+ continue
+ }
+
+ for _, in := range ins {
+ ok := false
+ for i, p := range ps {
+ if p.Position.Line != in.Line {
+ continue
+ }
+ if in.Match.MatchString(p.Text) {
+ // check replacement if we are expecting one
+ if in.Replacement != "" {
+ // ignore any inline comments, since that would be recursive
+ r := p.ReplacementLine
+ if i := strings.Index(r, " //"); i >= 0 {
+ r = r[:i]
+ }
+ if r != in.Replacement {
+ t.Errorf("Lint failed at %s:%d; got replacement %q, want %q", fi.Name(), in.Line, r, in.Replacement)
+ }
+ }
+
+ // remove this problem from ps
+ copy(ps[i:], ps[i+1:])
+ ps = ps[:len(ps)-1]
+
+ //t.Logf("/%v/ matched at %s:%d", in.Match, fi.Name(), in.Line)
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ t.Errorf("Lint failed at %s:%d; /%v/ did not match", fi.Name(), in.Line, in.Match)
+ }
+ }
+ for _, p := range ps {
+ t.Errorf("Unexpected problem at %s:%d: %v", fi.Name(), p.Position.Line, p.Text)
+ }
+ }
+}
+
+type instruction struct {
+ Line int // the line number this applies to
+ Match *regexp.Regexp // what pattern to match
+ Replacement string // what the suggested replacement line should be
+}
+
+// parseInstructions parses instructions from the comments in a Go source file.
+// It returns nil if none were parsed.
+func parseInstructions(t *testing.T, filename string, src []byte) []instruction {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ t.Fatalf("Test file %v does not parse: %v", filename, err)
+ }
+ var ins []instruction
+ for _, cg := range f.Comments {
+ ln := fset.Position(cg.Pos()).Line
+ raw := cg.Text()
+ for _, line := range strings.Split(raw, "\n") {
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ if line == "OK" && ins == nil {
+ // so our return value will be non-nil
+ ins = make([]instruction, 0)
+ continue
+ }
+ if strings.Contains(line, "MATCH") {
+ rx, err := extractPattern(line)
+ if err != nil {
+ t.Fatalf("At %v:%d: %v", filename, ln, err)
+ }
+ matchLine := ln
+ if i := strings.Index(line, "MATCH:"); i >= 0 {
+ // This is a match for a different line.
+ lns := strings.TrimPrefix(line[i:], "MATCH:")
+ lns = lns[:strings.Index(lns, " ")]
+ matchLine, err = strconv.Atoi(lns)
+ if err != nil {
+ t.Fatalf("Bad match line number %q at %v:%d: %v", lns, filename, ln, err)
+ }
+ }
+ var repl string
+ if r, ok := extractReplacement(line); ok {
+ repl = r
+ }
+ ins = append(ins, instruction{
+ Line: matchLine,
+ Match: rx,
+ Replacement: repl,
+ })
+ }
+ }
+ }
+ return ins
+}
+
+func extractPattern(line string) (*regexp.Regexp, error) {
+ a, b := strings.Index(line, "/"), strings.LastIndex(line, "/")
+ if a == -1 || a == b {
+ return nil, fmt.Errorf("malformed match instruction %q", line)
+ }
+ pat := line[a+1 : b]
+ rx, err := regexp.Compile(pat)
+ if err != nil {
+ return nil, fmt.Errorf("bad match pattern %q: %v", pat, err)
+ }
+ return rx, nil
+}
+
+func extractReplacement(line string) (string, bool) {
+ // Look for this: / -> `
+ // (the end of a match and start of a backtick string),
+ // and then the closing backtick.
+ const start = "/ -> `"
+ a, b := strings.Index(line, start), strings.LastIndex(line, "`")
+ if a < 0 || a > b {
+ return "", false
+ }
+ return line[a+len(start) : b], true
+}
+
+func render(fset *token.FileSet, x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func TestLine(t *testing.T) {
+ tests := []struct {
+ src string
+ offset int
+ want string
+ }{
+ {"single line file", 5, "single line file"},
+ {"single line file with newline\n", 5, "single line file with newline\n"},
+ {"first\nsecond\nthird\n", 2, "first\n"},
+ {"first\nsecond\nthird\n", 9, "second\n"},
+ {"first\nsecond\nthird\n", 14, "third\n"},
+ {"first\nsecond\nthird with no newline", 16, "third with no newline"},
+ {"first byte\n", 0, "first byte\n"},
+ }
+ for _, test := range tests {
+ got := srcLine([]byte(test.src), token.Position{Offset: test.offset})
+ if got != test.want {
+ t.Errorf("srcLine(%q, offset=%d) = %q, want %q", test.src, test.offset, got, test.want)
+ }
+ }
+}
+
+func TestLintName(t *testing.T) {
+ tests := []struct {
+ name, want string
+ }{
+ {"foo_bar", "fooBar"},
+ {"foo_bar_baz", "fooBarBaz"},
+ {"Foo_bar", "FooBar"},
+ {"foo_WiFi", "fooWiFi"},
+ {"id", "id"},
+ {"Id", "ID"},
+ {"foo_id", "fooID"},
+ {"fooId", "fooID"},
+ {"fooUid", "fooUID"},
+ {"idFoo", "idFoo"},
+ {"uidFoo", "uidFoo"},
+ {"midIdDle", "midIDDle"},
+ {"APIProxy", "APIProxy"},
+ {"ApiProxy", "APIProxy"},
+ {"apiProxy", "apiProxy"},
+ {"_Leading", "_Leading"},
+ {"___Leading", "_Leading"},
+ {"trailing_", "trailing"},
+ {"trailing___", "trailing"},
+ {"a_b", "aB"},
+ {"a__b", "aB"},
+ {"a___b", "aB"},
+ {"Rpc1150", "RPC1150"},
+ {"case3_1", "case3_1"},
+ {"case3__1", "case3_1"},
+ {"IEEE802_16bit", "IEEE802_16bit"},
+ {"IEEE802_16Bit", "IEEE802_16Bit"},
+ }
+ for _, test := range tests {
+ got := lintName(test.name)
+ if got != test.want {
+ t.Errorf("lintName(%q) = %q, want %q", test.name, got, test.want)
+ }
+ }
+}
+
+func TestExportedType(t *testing.T) {
+ tests := []struct {
+ typString string
+ exp bool
+ }{
+ {"int", true},
+ {"string", false}, // references the shadowed builtin "string"
+ {"T", true},
+ {"t", false},
+ {"*T", true},
+ {"*t", false},
+ {"map[int]complex128", true},
+ }
+ for _, test := range tests {
+ src := `package foo; type T int; type t int; type string struct{}`
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "foo.go", src, 0)
+ if err != nil {
+ t.Fatalf("Parsing %q: %v", src, err)
+ }
+ // use the package name as package path
+ config := &types.Config{}
+ pkg, err := config.Check(file.Name.Name, fset, []*ast.File{file}, nil)
+ if err != nil {
+ t.Fatalf("Type checking %q: %v", src, err)
+ }
+ tv, err := types.Eval(fset, pkg, token.NoPos, test.typString)
+ if err != nil {
+ t.Errorf("types.Eval(%q): %v", test.typString, err)
+ continue
+ }
+ if got := exportedType(tv.Type); got != test.exp {
+ t.Errorf("exportedType(%v) = %t, want %t", tv.Type, got, test.exp)
+ }
+ }
+}
+
+func TestIsGenerated(t *testing.T) {
+ tests := []struct {
+ source string
+ generated bool
+ }{
+ {"// Code Generated by some tool. DO NOT EDIT.", false},
+ {"// Code generated by some tool. DO NOT EDIT.", true},
+ {"// Code generated by some tool. DO NOT EDIT", false},
+ {"// Code generated DO NOT EDIT.", true},
+ {"// Code generated DO NOT EDIT.", false},
+ {"\t\t// Code generated by some tool. DO NOT EDIT.\npackage foo\n", false},
+ {"// Code generated by some tool. DO NOT EDIT.\npackage foo\n", true},
+ {"package foo\n// Code generated by some tool. DO NOT EDIT.\ntype foo int\n", true},
+ {"package foo\n // Code generated by some tool. DO NOT EDIT.\ntype foo int\n", false},
+ {"package foo\n// Code generated by some tool. DO NOT EDIT. \ntype foo int\n", false},
+ {"package foo\ntype foo int\n// Code generated by some tool. DO NOT EDIT.\n", true},
+ {"package foo\ntype foo int\n// Code generated by some tool. DO NOT EDIT.", true},
+ }
+
+ for i, test := range tests {
+ got := isGenerated([]byte(test.source))
+ if got != test.generated {
+ t.Errorf("test %d, isGenerated() = %v, want %v", i, got, test.generated)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore
new file mode 100644
index 0000000..c7dd405
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/.gitignore
@@ -0,0 +1,17 @@
+.DS_Store
+*.[568ao]
+*.ao
+*.so
+*.pyc
+._*
+.nfs.*
+[568a].out
+*~
+*.orig
+core
+_obj
+_test
+_testmain.go
+
+# Conformance test output and transient files.
+conformance/failing_tests.txt
diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml
new file mode 100644
index 0000000..fd650b2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/.travis.yml
@@ -0,0 +1,32 @@
+sudo: false
+language: go
+go:
+- 1.9.x
+- 1.10.x
+- 1.11.x
+- 1.x
+
+install:
+ - go get -v -d google.golang.org/grpc
+ - go get -v -d -t github.com/golang/protobuf/...
+ - curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protoc-3.6.1-linux-x86_64.zip -o /tmp/protoc.zip
+ - unzip /tmp/protoc.zip -d "$HOME"/protoc
+ - mkdir -p "$HOME"/src && ln -s "$HOME"/protoc "$HOME"/src/protobuf
+
+env:
+ - PATH=$HOME/protoc/bin:$PATH
+
+script:
+ - make all
+ - make regenerate
+ # TODO(tamird): When https://github.com/travis-ci/gimme/pull/130 is
+ # released, make this look for "1.x".
+ - if [[ "$TRAVIS_GO_VERSION" == 1.10* ]]; then
+ if [[ "$(git status --porcelain 2>&1)" != "" ]]; then
+ git status >&2;
+ git diff -a >&2;
+ exit 1;
+ fi;
+ echo "git status is clean.";
+ fi;
+ - make test
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile
new file mode 100644
index 0000000..109f1cb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/Makefile
@@ -0,0 +1,49 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+all: install
+
+install:
+ go install ./proto ./jsonpb ./ptypes ./protoc-gen-go
+
+test:
+ go test ./... ./protoc-gen-go/testdata
+ go test -tags purego ./... ./protoc-gen-go/testdata
+ go build ./protoc-gen-go/testdata/grpc/grpc.pb.go
+
+clean:
+ go clean ./...
+
+nuke:
+ go clean -i ./...
+
+regenerate:
+ ./regenerate.sh
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
new file mode 100644
index 0000000..04a52df
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -0,0 +1,289 @@
+# Go support for Protocol Buffers - Google's data interchange format
+
+[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
+[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
+
+Google's data interchange format.
+Copyright 2010 The Go Authors.
+https://github.com/golang/protobuf
+
+This package and the code it generates requires at least Go 1.9.
+
+This software implements Go bindings for protocol buffers. For
+information about protocol buffers themselves, see
+ https://developers.google.com/protocol-buffers/
+
+## Installation ##
+
+To use this software, you must:
+- Install the standard C++ implementation of protocol buffers from
+ https://developers.google.com/protocol-buffers/
+- Of course, install the Go compiler and tools from
+ https://golang.org/
+ See
+ https://golang.org/doc/install
+ for details or, if you are using gccgo, follow the instructions at
+ https://golang.org/doc/install/gccgo
+- Grab the code from the repository and install the `proto` package.
+ The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
+ The compiler plugin, `protoc-gen-go`, will be installed in `$GOPATH/bin`
+ unless `$GOBIN` is set. It must be in your `$PATH` for the protocol
+ compiler, `protoc`, to find it.
+- If you need a particular version of `protoc-gen-go` (e.g., to match your
+ `proto` package version), one option is
+ ```shell
+ GIT_TAG="v1.2.0" # change as needed
+ go get -d -u github.com/golang/protobuf/protoc-gen-go
+ git -C "$(go env GOPATH)"/src/github.com/golang/protobuf checkout $GIT_TAG
+ go install github.com/golang/protobuf/protoc-gen-go
+ ```
+
+This software has two parts: a 'protocol compiler plugin' that
+generates Go source files that, once compiled, can access and manage
+protocol buffers; and a library that implements run-time support for
+encoding (marshaling), decoding (unmarshaling), and accessing protocol
+buffers.
+
+There is support for gRPC in Go using protocol buffers.
+See the note at the bottom of this file for details.
+
+There are no insertion points in the plugin.
+
+
+## Using protocol buffers with Go ##
+
+Once the software is installed, there are two steps to using it.
+First you must compile the protocol buffer definitions and then import
+them, with the support library, into your program.
+
+To compile the protocol buffer definition, run protoc with the --go_out
+parameter set to the directory you want to output the Go code to.
+
+ protoc --go_out=. *.proto
+
+The generated files will be suffixed .pb.go. See the Test code below
+for an example using such a file.
+
+## Packages and input paths ##
+
+The protocol buffer language has a concept of "packages" which does not
+correspond well to the Go notion of packages. In generated Go code,
+each source `.proto` file is associated with a single Go package. The
+name and import path for this package is specified with the `go_package`
+proto option:
+
+ option go_package = "github.com/golang/protobuf/ptypes/any";
+
+The protocol buffer compiler will attempt to derive a package name and
+import path if a `go_package` option is not present, but it is
+best to always specify one explicitly.
+
+There is a one-to-one relationship between source `.proto` files and
+generated `.pb.go` files, but any number of `.pb.go` files may be
+contained in the same Go package.
+
+The output name of a generated file is produced by replacing the
+`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
+However, the output directory is selected in one of two ways. Let
+us say we have `inputs/x.proto` with a `go_package` option of
+`github.com/golang/protobuf/p`. The corresponding output file may
+be:
+
+- Relative to the import path:
+
+```shell
+ protoc --go_out=. inputs/x.proto
+ # writes ./github.com/golang/protobuf/p/x.pb.go
+```
+
+ (This can work well with `--go_out=$GOPATH`.)
+
+- Relative to the input file:
+
+```shell
+protoc --go_out=paths=source_relative:. inputs/x.proto
+# generate ./inputs/x.pb.go
+```
+
+## Generated code ##
+
+The package comment for the proto library contains text describing
+the interface provided in Go for protocol buffers. Here is an edited
+version.
+
+The proto package converts data structures to and from the
+wire format of protocol buffers. It works in concert with the
+Go source code generated for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+Consider file test.proto, containing
+
+```proto
+ syntax = "proto2";
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ }
+```
+
+To create and play with a Test object from the example package,
+
+```go
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ "path/to/example"
+ )
+
+ func main() {
+ test := &example.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &example.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+```
+
+## Parameters ##
+
+To pass extra parameters to the plugin, use a comma-separated
+parameter list separated from the output directory by a colon:
+
+ protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
+
+- `paths=(import | source_relative)` - specifies how the paths of
+ generated files are structured. See the "Packages and imports paths"
+ section above. The default is `import`.
+- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
+ load. The only plugin in this repo is `grpc`.
+- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
+ associated with Go package quux/shme. This is subject to the
+ import_prefix parameter.
+
+The following parameters are deprecated and should not be used:
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+ all imports.
+- `import_path=foo/bar` - used as the package if no input files
+ declare `go_package`. If it contains slashes, everything up to the
+ rightmost slash is ignored.
+
+## gRPC Support ##
+
+If a proto file specifies RPC services, protoc-gen-go can be instructed to
+generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
+the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
+the --go_out argument to protoc:
+
+ protoc --go_out=plugins=grpc:. *.proto
+
+## Compatibility ##
+
+The library and the generated code are expected to be stable over time.
+However, we reserve the right to make breaking changes without notice for the
+following reasons:
+
+- Security. A security issue in the specification or implementation may come to
+ light whose resolution requires breaking compatibility. We reserve the right
+ to address such security issues.
+- Unspecified behavior. There are some aspects of the Protocol Buffers
+ specification that are undefined. Programs that depend on such unspecified
+ behavior may break in future releases.
+- Specification errors or changes. If it becomes necessary to address an
+ inconsistency, incompleteness, or change in the Protocol Buffers
+ specification, resolving the issue could affect the meaning or legality of
+ existing programs. We reserve the right to address such issues, including
+ updating the implementations.
+- Bugs. If the library has a bug that violates the specification, a program
+ that depends on the buggy behavior may break if the bug is fixed. We reserve
+ the right to fix such bugs.
+- Adding methods or fields to generated structs. These may conflict with field
+ names that already exist in a schema, causing applications to break. When the
+ code generator encounters a field in the schema that would collide with a
+ generated field or method name, the code generator will append an underscore
+ to the generated field or method name.
+- Adding, removing, or changing methods or fields in generated structs that
+ start with `XXX`. These parts of the generated code are exported out of
+ necessity, but should not be considered part of the public API.
+- Adding, removing, or changing unexported symbols in generated code.
+
+Any breaking changes outside of these will be announced 6 months in advance to
+protobuf@googlegroups.com.
+
+You should, whenever possible, use generated code created by the `protoc-gen-go`
+tool built at the same commit as the `proto` package. The `proto` package
+declares package-level constants in the form `ProtoPackageIsVersionX`.
+Application code and generated code may depend on one of these constants to
+ensure that compilation will fail if the available version of the proto library
+is too old. Whenever we make a change to the generated code that requires newer
+library support, in the same commit we will increment the version number of the
+generated code and declare a new package-level constant whose name incorporates
+the latest version number. Removing a compatibility constant is considered a
+breaking change and would be subject to the announcement policy stated above.
+
+The `protoc-gen-go/generator` package exposes a plugin interface,
+which is used by the gRPC code generation. This interface is not
+supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/golang/protobuf/go.mod b/vendor/github.com/golang/protobuf/go.mod
new file mode 100644
index 0000000..eccf7fd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/go.mod
@@ -0,0 +1 @@
+module github.com/golang/protobuf
diff --git a/vendor/github.com/golang/protobuf/go.sum b/vendor/github.com/golang/protobuf/go.sum
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go
new file mode 100644
index 0000000..1bea4b6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/all_test.go
@@ -0,0 +1,2492 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ . "github.com/golang/protobuf/proto"
+ pb3 "github.com/golang/protobuf/proto/proto3_proto"
+ . "github.com/golang/protobuf/proto/test_proto"
+)
+
+var globalO *Buffer
+
+func old() *Buffer {
+ if globalO == nil {
+ globalO = NewBuffer(nil)
+ }
+ globalO.Reset()
+ return globalO
+}
+
+func equalbytes(b1, b2 []byte, t *testing.T) {
+ if len(b1) != len(b2) {
+ t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
+ return
+ }
+ for i := 0; i < len(b1); i++ {
+ if b1[i] != b2[i] {
+ t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
+ }
+ }
+}
+
+func initGoTestField() *GoTestField {
+ f := new(GoTestField)
+ f.Label = String("label")
+ f.Type = String("type")
+ return f
+}
+
+// These are all structurally equivalent but the tag numbers differ.
+// (It's remarkable that required, optional, and repeated all have
+// 8 letters.)
+func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
+ return &GoTest_RequiredGroup{
+ RequiredField: String("required"),
+ }
+}
+
+func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
+ return &GoTest_OptionalGroup{
+ RequiredField: String("optional"),
+ }
+}
+
+func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
+ return &GoTest_RepeatedGroup{
+ RequiredField: String("repeated"),
+ }
+}
+
+func initGoTest(setdefaults bool) *GoTest {
+ pb := new(GoTest)
+ if setdefaults {
+ pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
+ pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
+ pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
+ pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
+ pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
+ pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
+ pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
+ pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
+ pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
+ pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
+ pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
+ pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
+ pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
+ pb.F_Sfixed32Defaulted = Int32(Default_GoTest_F_Sfixed32Defaulted)
+ pb.F_Sfixed64Defaulted = Int64(Default_GoTest_F_Sfixed64Defaulted)
+ }
+
+ pb.Kind = GoTest_TIME.Enum()
+ pb.RequiredField = initGoTestField()
+ pb.F_BoolRequired = Bool(true)
+ pb.F_Int32Required = Int32(3)
+ pb.F_Int64Required = Int64(6)
+ pb.F_Fixed32Required = Uint32(32)
+ pb.F_Fixed64Required = Uint64(64)
+ pb.F_Uint32Required = Uint32(3232)
+ pb.F_Uint64Required = Uint64(6464)
+ pb.F_FloatRequired = Float32(3232)
+ pb.F_DoubleRequired = Float64(6464)
+ pb.F_StringRequired = String("string")
+ pb.F_BytesRequired = []byte("bytes")
+ pb.F_Sint32Required = Int32(-32)
+ pb.F_Sint64Required = Int64(-64)
+ pb.F_Sfixed32Required = Int32(-32)
+ pb.F_Sfixed64Required = Int64(-64)
+ pb.Requiredgroup = initGoTest_RequiredGroup()
+
+ return pb
+}
+
+func hex(c uint8) uint8 {
+ if '0' <= c && c <= '9' {
+ return c - '0'
+ }
+ if 'a' <= c && c <= 'f' {
+ return 10 + c - 'a'
+ }
+ if 'A' <= c && c <= 'F' {
+ return 10 + c - 'A'
+ }
+ return 0
+}
+
+func equal(b []byte, s string, t *testing.T) bool {
+ if 2*len(b) != len(s) {
+ // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
+ fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
+ return false
+ }
+ for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
+ x := hex(s[j])*16 + hex(s[j+1])
+ if b[i] != x {
+ // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
+ fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
+ return false
+ }
+ }
+ return true
+}
+
+func overify(t *testing.T, pb *GoTest, expected string) {
+ o := old()
+ err := o.Marshal(pb)
+ if err != nil {
+ fmt.Printf("overify marshal-1 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 1", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = o.Unmarshal(pbd)
+ if err != nil {
+ t.Fatalf("overify unmarshal err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ o.Reset()
+ err = o.Marshal(pbd)
+ if err != nil {
+ t.Errorf("overify marshal-2 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 2", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+// Simple tests for numeric encode/decode primitives (varint, etc.)
+func TestNumericPrimitives(t *testing.T) {
+ for i := uint64(0); i < 1e6; i += 111 {
+ o := old()
+ if o.EncodeVarint(i) != nil {
+ t.Error("EncodeVarint")
+ break
+ }
+ x, e := o.DecodeVarint()
+ if e != nil {
+ t.Fatal("DecodeVarint")
+ }
+ if x != i {
+ t.Fatal("varint decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed32(i) != nil {
+ t.Fatal("encFixed32")
+ }
+ x, e = o.DecodeFixed32()
+ if e != nil {
+ t.Fatal("decFixed32")
+ }
+ if x != i {
+ t.Fatal("fixed32 decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed64(i*1234567) != nil {
+ t.Error("encFixed64")
+ break
+ }
+ x, e = o.DecodeFixed64()
+ if e != nil {
+ t.Error("decFixed64")
+ break
+ }
+ if x != i*1234567 {
+ t.Error("fixed64 decode fail:", i*1234567, x)
+ break
+ }
+
+ o = old()
+ i32 := int32(i - 12345)
+ if o.EncodeZigzag32(uint64(i32)) != nil {
+ t.Fatal("EncodeZigzag32")
+ }
+ x, e = o.DecodeZigzag32()
+ if e != nil {
+ t.Fatal("DecodeZigzag32")
+ }
+ if x != uint64(uint32(i32)) {
+ t.Fatal("zigzag32 decode fail:", i32, x)
+ }
+
+ o = old()
+ i64 := int64(i - 12345)
+ if o.EncodeZigzag64(uint64(i64)) != nil {
+ t.Fatal("EncodeZigzag64")
+ }
+ x, e = o.DecodeZigzag64()
+ if e != nil {
+ t.Fatal("DecodeZigzag64")
+ }
+ if x != uint64(i64) {
+ t.Fatal("zigzag64 decode fail:", i64, x)
+ }
+ }
+}
+
+// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
+type fakeMarshaler struct {
+ b []byte
+ err error
+}
+
+func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err }
+func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) }
+func (f *fakeMarshaler) ProtoMessage() {}
+func (f *fakeMarshaler) Reset() {}
+
+type msgWithFakeMarshaler struct {
+ M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"`
+}
+
+func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) }
+func (m *msgWithFakeMarshaler) ProtoMessage() {}
+func (m *msgWithFakeMarshaler) Reset() {}
+
+// Simple tests for proto messages that implement the Marshaler interface.
+func TestMarshalerEncoding(t *testing.T) {
+ tests := []struct {
+ name string
+ m Message
+ want []byte
+ errType reflect.Type
+ }{
+ {
+ name: "Marshaler that fails",
+ m: &fakeMarshaler{
+ err: errors.New("some marshal err"),
+ b: []byte{5, 6, 7},
+ },
+ // Since the Marshal method returned bytes, they should be written to the
+ // buffer. (For efficiency, we assume that Marshal implementations are
+ // always correct w.r.t. RequiredNotSetError and output.)
+ want: []byte{5, 6, 7},
+ errType: reflect.TypeOf(errors.New("some marshal err")),
+ },
+ {
+ name: "Marshaler that fails with RequiredNotSetError",
+ m: &msgWithFakeMarshaler{
+ M: &fakeMarshaler{
+ err: &RequiredNotSetError{},
+ b: []byte{5, 6, 7},
+ },
+ },
+ // Since there's an error that can be continued after,
+ // the buffer should be written.
+ want: []byte{
+ 10, 3, // for &msgWithFakeMarshaler
+ 5, 6, 7, // for &fakeMarshaler
+ },
+ errType: reflect.TypeOf(&RequiredNotSetError{}),
+ },
+ {
+ name: "Marshaler that succeeds",
+ m: &fakeMarshaler{
+ b: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ want: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ }
+ for _, test := range tests {
+ b := NewBuffer(nil)
+ err := b.Marshal(test.m)
+ if reflect.TypeOf(err) != test.errType {
+ t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType)
+ }
+ if !reflect.DeepEqual(test.want, b.Bytes()) {
+ t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
+ }
+ if size := Size(test.m); size != len(b.Bytes()) {
+ t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes()))
+ }
+
+ m, mErr := Marshal(test.m)
+ if !bytes.Equal(b.Bytes(), m) {
+ t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes())
+ }
+ if !reflect.DeepEqual(err, mErr) {
+ t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q",
+ test.name, fmt.Sprint(mErr), fmt.Sprint(err))
+ }
+ }
+}
+
+// Ensure that Buffer.Marshal uses O(N) memory for N messages
+func TestBufferMarshalAllocs(t *testing.T) {
+ value := &OtherMessage{Key: Int64(1)}
+ msg := &MyMessage{Count: Int32(1), Others: []*OtherMessage{value}}
+
+ reallocSize := func(t *testing.T, items int, prealloc int) (int64, int64) {
+ var b Buffer
+ b.SetBuf(make([]byte, 0, prealloc))
+
+ var allocSpace int64
+ prevCap := cap(b.Bytes())
+ for i := 0; i < items; i++ {
+ err := b.Marshal(msg)
+ if err != nil {
+ t.Errorf("Marshal err = %q", err)
+ break
+ }
+ if c := cap(b.Bytes()); prevCap != c {
+ allocSpace += int64(c)
+ prevCap = c
+ }
+ }
+ needSpace := int64(len(b.Bytes()))
+ return allocSpace, needSpace
+ }
+
+ for _, prealloc := range []int{0, 100, 10000} {
+ for _, items := range []int{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000} {
+ runtimeSpace, need := reallocSize(t, items, prealloc)
+ totalSpace := int64(prealloc) + runtimeSpace
+
+ runtimeRatio := float64(runtimeSpace) / float64(need)
+ totalRatio := float64(totalSpace) / float64(need)
+
+ if totalRatio < 1 || runtimeRatio > 4 {
+ t.Errorf("needed %dB, allocated %dB total (ratio %.1f), allocated %dB at runtime (ratio %.1f)",
+ need, totalSpace, totalRatio, runtimeSpace, runtimeRatio)
+ }
+ }
+ }
+}
+
+// Simple tests for bytes
+func TestBytesPrimitives(t *testing.T) {
+ o := old()
+ bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
+ if o.EncodeRawBytes(bytes) != nil {
+ t.Error("EncodeRawBytes")
+ }
+ decb, e := o.DecodeRawBytes(false)
+ if e != nil {
+ t.Error("DecodeRawBytes")
+ }
+ equalbytes(bytes, decb, t)
+}
+
+// Simple tests for strings
+func TestStringPrimitives(t *testing.T) {
+ o := old()
+ s := "now is the time"
+ if o.EncodeStringBytes(s) != nil {
+ t.Error("enc_string")
+ }
+ decs, e := o.DecodeStringBytes()
+ if e != nil {
+ t.Error("dec_string")
+ }
+ if s != decs {
+ t.Error("string encode/decode fail:", s, decs)
+ }
+}
+
+// Do we catch the "required bit not set" case?
+func TestRequiredBit(t *testing.T) {
+ o := old()
+ pb := new(GoTest)
+ err := o.Marshal(pb)
+ if err == nil {
+ t.Error("did not catch missing required fields")
+ } else if !strings.Contains(err.Error(), "Kind") {
+ t.Error("wrong error type:", err)
+ }
+}
+
+// Check that all fields are nil.
+// Clearly silly, and a residue from a more interesting test with an earlier,
+// different initialization property, but it once caught a compiler bug so
+// it lives.
+func checkInitialized(pb *GoTest, t *testing.T) {
+ if pb.F_BoolDefaulted != nil {
+ t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
+ }
+ if pb.F_Int32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
+ }
+ if pb.F_Int64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
+ }
+ if pb.F_Fixed32Defaulted != nil {
+ t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
+ }
+ if pb.F_Fixed64Defaulted != nil {
+ t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
+ }
+ if pb.F_Uint32Defaulted != nil {
+ t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
+ }
+ if pb.F_Uint64Defaulted != nil {
+ t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
+ }
+ if pb.F_FloatDefaulted != nil {
+ t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
+ }
+ if pb.F_DoubleDefaulted != nil {
+ t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
+ }
+ if pb.F_StringDefaulted != nil {
+ t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
+ }
+ if pb.F_BytesDefaulted != nil {
+ t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
+ }
+ if pb.F_Sint32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
+ }
+ if pb.F_Sint64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
+ }
+}
+
+// Does Reset() reset?
+func TestReset(t *testing.T) {
+ pb := initGoTest(true)
+ // muck with some values
+ pb.F_BoolDefaulted = Bool(false)
+ pb.F_Int32Defaulted = Int32(237)
+ pb.F_Int64Defaulted = Int64(12346)
+ pb.F_Fixed32Defaulted = Uint32(32000)
+ pb.F_Fixed64Defaulted = Uint64(666)
+ pb.F_Uint32Defaulted = Uint32(323232)
+ pb.F_Uint64Defaulted = nil
+ pb.F_FloatDefaulted = nil
+ pb.F_DoubleDefaulted = Float64(0)
+ pb.F_StringDefaulted = String("gotcha")
+ pb.F_BytesDefaulted = []byte("asdfasdf")
+ pb.F_Sint32Defaulted = Int32(123)
+ pb.F_Sint64Defaulted = Int64(789)
+ pb.Reset()
+ checkInitialized(pb, t)
+}
+
+// All required fields set, no defaults provided.
+func TestEncodeDecode1(t *testing.T) {
+ pb := initGoTest(false)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 0x20
+ "714000000000000000"+ // field 14, encoding 1, value 0x40
+ "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
+ "b304"+ // field 70, encoding 3, start group
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // field 70, encoding 4, end group
+ "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff") // field 105, encoding 1, -64 fixed64
+}
+
+// All required fields set, defaults provided.
+func TestEncodeDecode2(t *testing.T) {
+ pb := initGoTest(true)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f"+ // field 403, encoding 0, value 127
+ "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32
+ "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64
+
+}
+
+// All default fields set to their default value by hand
+func TestEncodeDecode3(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolDefaulted = Bool(true)
+ pb.F_Int32Defaulted = Int32(32)
+ pb.F_Int64Defaulted = Int64(64)
+ pb.F_Fixed32Defaulted = Uint32(320)
+ pb.F_Fixed64Defaulted = Uint64(640)
+ pb.F_Uint32Defaulted = Uint32(3200)
+ pb.F_Uint64Defaulted = Uint64(6400)
+ pb.F_FloatDefaulted = Float32(314159)
+ pb.F_DoubleDefaulted = Float64(271828)
+ pb.F_StringDefaulted = String("hello, \"world!\"\n")
+ pb.F_BytesDefaulted = []byte("Bignose")
+ pb.F_Sint32Defaulted = Int32(-32)
+ pb.F_Sint64Defaulted = Int64(-64)
+ pb.F_Sfixed32Defaulted = Int32(-32)
+ pb.F_Sfixed64Defaulted = Int64(-64)
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f"+ // field 403, encoding 0, value 127
+ "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32
+ "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64
+
+}
+
+// All required fields set, defaults provided, all non-defaulted optional fields have values.
+func TestEncodeDecode4(t *testing.T) {
+ pb := initGoTest(true)
+ pb.Table = String("hello")
+ pb.Param = Int32(7)
+ pb.OptionalField = initGoTestField()
+ pb.F_BoolOptional = Bool(true)
+ pb.F_Int32Optional = Int32(32)
+ pb.F_Int64Optional = Int64(64)
+ pb.F_Fixed32Optional = Uint32(3232)
+ pb.F_Fixed64Optional = Uint64(6464)
+ pb.F_Uint32Optional = Uint32(323232)
+ pb.F_Uint64Optional = Uint64(646464)
+ pb.F_FloatOptional = Float32(32.)
+ pb.F_DoubleOptional = Float64(64.)
+ pb.F_StringOptional = String("hello")
+ pb.F_BytesOptional = []byte("Bignose")
+ pb.F_Sint32Optional = Int32(-32)
+ pb.F_Sint64Optional = Int64(-64)
+ pb.F_Sfixed32Optional = Int32(-32)
+ pb.F_Sfixed64Optional = Int64(-64)
+ pb.Optionalgroup = initGoTest_OptionalGroup()
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
+ "1807"+ // field 3, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "f00101"+ // field 30, encoding 0, value 1
+ "f80120"+ // field 31, encoding 0, value 32
+ "800240"+ // field 32, encoding 0, value 64
+ "8d02a00c0000"+ // field 33, encoding 5, value 3232
+ "91024019000000000000"+ // field 34, encoding 1, value 6464
+ "9802a0dd13"+ // field 35, encoding 0, value 323232
+ "a002c0ba27"+ // field 36, encoding 0, value 646464
+ "ad0200000042"+ // field 37, encoding 5, value 32.0
+ "b1020000000000005040"+ // field 38, encoding 1, value 64.0
+ "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "d305"+ // start group field 90 level 1
+ "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
+ "d405"+ // end group field 90 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64
+ "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
+ "f0123f"+ // field 302, encoding 0, value 63
+ "f8127f"+ // field 303, encoding 0, value 127
+ "8513e0ffffff"+ // field 304, encoding 5, -32 fixed32
+ "8913c0ffffffffffffff"+ // field 305, encoding 1, -64 fixed64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f"+ // field 403, encoding 0, value 127
+ "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32
+ "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64
+
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestEncodeDecode5(t *testing.T) {
+ pb := initGoTest(true)
+ pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
+ pb.F_BoolRepeated = []bool{false, true}
+ pb.F_Int32Repeated = []int32{32, 33}
+ pb.F_Int64Repeated = []int64{64, 65}
+ pb.F_Fixed32Repeated = []uint32{3232, 3333}
+ pb.F_Fixed64Repeated = []uint64{6464, 6565}
+ pb.F_Uint32Repeated = []uint32{323232, 333333}
+ pb.F_Uint64Repeated = []uint64{646464, 656565}
+ pb.F_FloatRepeated = []float32{32., 33.}
+ pb.F_DoubleRepeated = []float64{64., 65.}
+ pb.F_StringRepeated = []string{"hello", "sailor"}
+ pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
+ pb.F_Sint32Repeated = []int32{32, -32}
+ pb.F_Sint64Repeated = []int64{64, -64}
+ pb.F_Sfixed32Repeated = []int32{32, -32}
+ pb.F_Sfixed64Repeated = []int64{64, -64}
+ pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "a00100"+ // field 20, encoding 0, value 0
+ "a00101"+ // field 20, encoding 0, value 1
+ "a80120"+ // field 21, encoding 0, value 32
+ "a80121"+ // field 21, encoding 0, value 33
+ "b00140"+ // field 22, encoding 0, value 64
+ "b00141"+ // field 22, encoding 0, value 65
+ "bd01a00c0000"+ // field 23, encoding 5, value 3232
+ "bd01050d0000"+ // field 23, encoding 5, value 3333
+ "c1014019000000000000"+ // field 24, encoding 1, value 6464
+ "c101a519000000000000"+ // field 24, encoding 1, value 6565
+ "c801a0dd13"+ // field 25, encoding 0, value 323232
+ "c80195ac14"+ // field 25, encoding 0, value 333333
+ "d001c0ba27"+ // field 26, encoding 0, value 646464
+ "d001b58928"+ // field 26, encoding 0, value 656565
+ "dd0100000042"+ // field 27, encoding 5, value 32.0
+ "dd0100000442"+ // field 27, encoding 5, value 33.0
+ "e1010000000000005040"+ // field 28, encoding 1, value 64.0
+ "e1010000000000405040"+ // field 28, encoding 1, value 65.0
+ "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
+ "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64
+ "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
+ "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
+ "d00c40"+ // field 202, encoding 0, value 32
+ "d00c3f"+ // field 202, encoding 0, value -32
+ "d80c8001"+ // field 203, encoding 0, value 64
+ "d80c7f"+ // field 203, encoding 0, value -64
+ "e50c20000000"+ // field 204, encoding 5, 32 fixed32
+ "e50ce0ffffff"+ // field 204, encoding 5, -32 fixed32
+ "e90c4000000000000000"+ // field 205, encoding 1, 64 fixed64
+ "e90cc0ffffffffffffff"+ // field 205, encoding 1, -64 fixed64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f"+ // field 403, encoding 0, value 127
+ "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32
+ "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64
+
+}
+
+// All required fields set, all packed repeated fields given two values.
+func TestEncodeDecode6(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolRepeatedPacked = []bool{false, true}
+ pb.F_Int32RepeatedPacked = []int32{32, 33}
+ pb.F_Int64RepeatedPacked = []int64{64, 65}
+ pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
+ pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
+ pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
+ pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
+ pb.F_FloatRepeatedPacked = []float32{32., 33.}
+ pb.F_DoubleRepeatedPacked = []float64{64., 65.}
+ pb.F_Sint32RepeatedPacked = []int32{32, -32}
+ pb.F_Sint64RepeatedPacked = []int64{64, -64}
+ pb.F_Sfixed32RepeatedPacked = []int32{32, -32}
+ pb.F_Sfixed64RepeatedPacked = []int64{64, -64}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
+ "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
+ "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
+ "aa0308"+ // field 53, encoding 2, 8 bytes
+ "a00c0000050d0000"+ // value 3232, value 3333
+ "b20310"+ // field 54, encoding 2, 16 bytes
+ "4019000000000000a519000000000000"+ // value 6464, value 6565
+ "ba0306"+ // field 55, encoding 2, 6 bytes
+ "a0dd1395ac14"+ // value 323232, value 333333
+ "c20306"+ // field 56, encoding 2, 6 bytes
+ "c0ba27b58928"+ // value 646464, value 656565
+ "ca0308"+ // field 57, encoding 2, 8 bytes
+ "0000004200000442"+ // value 32.0, value 33.0
+ "d20310"+ // field 58, encoding 2, 16 bytes
+ "00000000000050400000000000405040"+ // value 64.0, value 65.0
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64
+ "b21f02"+ // field 502, encoding 2, 2 bytes
+ "403f"+ // value 32, value -32
+ "ba1f03"+ // field 503, encoding 2, 3 bytes
+ "80017f"+ // value 64, value -64
+ "c21f08"+ // field 504, encoding 2, 8 bytes
+ "20000000e0ffffff"+ // value 32, value -32
+ "ca1f10"+ // field 505, encoding 2, 16 bytes
+ "4000000000000000c0ffffffffffffff") // value 64, value -64
+
+}
+
+// Test that we can encode empty bytes fields.
+func TestEncodeDecodeBytes1(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRequired = []byte{}
+ pb.F_BytesRepeated = [][]byte{{}}
+ pb.F_BytesOptional = []byte{}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
+ t.Error("required empty bytes field is incorrect")
+ }
+ if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
+ t.Error("repeated empty bytes field is incorrect")
+ }
+ if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
+ t.Error("optional empty bytes field is incorrect")
+ }
+}
+
+// Test that we encode nil-valued fields of a repeated bytes field correctly.
+// Since entries in a repeated field cannot be nil, nil must mean empty value.
+func TestEncodeDecodeBytes2(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRepeated = [][]byte{nil}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
+ t.Error("Unexpected value for repeated bytes field")
+ }
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestSkippingUnrecognizedFields(t *testing.T) {
+ o := old()
+ pb := initGoTestField()
+
+ // Marshal it normally.
+ o.Marshal(pb)
+
+ // Now new a GoSkipTest record.
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ // Marshal it into same buffer.
+ o.Marshal(skip)
+
+ pbd := new(GoTestField)
+ o.Unmarshal(pbd)
+
+ // The __unrecognized field should be a marshaling of GoSkipTest
+ skipd := new(GoSkipTest)
+
+ o.SetBuf(pbd.XXX_unrecognized)
+ o.Unmarshal(skipd)
+
+ if *skipd.SkipInt32 != *skip.SkipInt32 {
+ t.Error("skip int32", skipd.SkipInt32)
+ }
+ if *skipd.SkipFixed32 != *skip.SkipFixed32 {
+ t.Error("skip fixed32", skipd.SkipFixed32)
+ }
+ if *skipd.SkipFixed64 != *skip.SkipFixed64 {
+ t.Error("skip fixed64", skipd.SkipFixed64)
+ }
+ if *skipd.SkipString != *skip.SkipString {
+ t.Error("skip string", *skipd.SkipString)
+ }
+ if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
+ t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
+ }
+ if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
+ t.Error("skip group string", *skipd.Skipgroup.GroupString)
+ }
+}
+
+// Check that unrecognized fields of a submessage are preserved.
+func TestSubmessageUnrecognizedFields(t *testing.T) {
+ nm := &NewMessage{
+ Nested: &NewMessage_Nested{
+ Name: String("Nigel"),
+ FoodGroup: String("carbs"),
+ },
+ }
+ b, err := Marshal(nm)
+ if err != nil {
+ t.Fatalf("Marshal of NewMessage: %v", err)
+ }
+
+ // Unmarshal into an OldMessage.
+ om := new(OldMessage)
+ if err := Unmarshal(b, om); err != nil {
+ t.Fatalf("Unmarshal to OldMessage: %v", err)
+ }
+ exp := &OldMessage{
+ Nested: &OldMessage_Nested{
+ Name: String("Nigel"),
+ // normal protocol buffer users should not do this
+ XXX_unrecognized: []byte("\x12\x05carbs"),
+ },
+ }
+ if !Equal(om, exp) {
+ t.Errorf("om = %v, want %v", om, exp)
+ }
+
+ // Clone the OldMessage.
+ om = Clone(om).(*OldMessage)
+ if !Equal(om, exp) {
+ t.Errorf("Clone(om) = %v, want %v", om, exp)
+ }
+
+ // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
+ if b, err = Marshal(om); err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+ t.Logf("Marshal(%v) -> %q", om, b)
+ nm2 := new(NewMessage)
+ if err := Unmarshal(b, nm2); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ if !Equal(nm, nm2) {
+ t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
+ }
+}
+
+// Check that an int32 field can be upgraded to an int64 field.
+func TestNegativeInt32(t *testing.T) {
+ om := &OldMessage{
+ Num: Int32(-1),
+ }
+ b, err := Marshal(om)
+ if err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+
+ // Check the size. It should be 11 bytes;
+ // 1 for the field/wire type, and 10 for the negative number.
+ if len(b) != 11 {
+ t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
+ }
+
+ // Unmarshal into a NewMessage.
+ nm := new(NewMessage)
+ if err := Unmarshal(b, nm); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ want := &NewMessage{
+ Num: Int64(-1),
+ }
+ if !Equal(nm, want) {
+ t.Errorf("nm = %v, want %v", nm, want)
+ }
+}
+
+// Check that we can grow an array (repeated field) to have many elements.
+// This test doesn't depend only on our encoding; for variety, it makes sure
+// we create, encode, and decode the correct contents explicitly. It's therefore
+// a bit messier.
+// This test also uses (and hence tests) the Marshal/Unmarshal functions
+// instead of the methods.
+func TestBigRepeated(t *testing.T) {
+ pb := initGoTest(true)
+
+ // Create the arrays
+ const N = 50 // Internally the library starts much smaller.
+ pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
+ pb.F_Sint64Repeated = make([]int64, N)
+ pb.F_Sint32Repeated = make([]int32, N)
+ pb.F_BytesRepeated = make([][]byte, N)
+ pb.F_StringRepeated = make([]string, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ pb.F_FloatRepeated = make([]float32, N)
+ pb.F_Uint64Repeated = make([]uint64, N)
+ pb.F_Uint32Repeated = make([]uint32, N)
+ pb.F_Fixed64Repeated = make([]uint64, N)
+ pb.F_Fixed32Repeated = make([]uint32, N)
+ pb.F_Int64Repeated = make([]int64, N)
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_BoolRepeated = make([]bool, N)
+ pb.RepeatedField = make([]*GoTestField, N)
+
+ // Fill in the arrays with checkable values.
+ igtf := initGoTestField()
+ igtrg := initGoTest_RepeatedGroup()
+ for i := 0; i < N; i++ {
+ pb.Repeatedgroup[i] = igtrg
+ pb.F_Sint64Repeated[i] = int64(i)
+ pb.F_Sint32Repeated[i] = int32(i)
+ s := fmt.Sprint(i)
+ pb.F_BytesRepeated[i] = []byte(s)
+ pb.F_StringRepeated[i] = s
+ pb.F_DoubleRepeated[i] = float64(i)
+ pb.F_FloatRepeated[i] = float32(i)
+ pb.F_Uint64Repeated[i] = uint64(i)
+ pb.F_Uint32Repeated[i] = uint32(i)
+ pb.F_Fixed64Repeated[i] = uint64(i)
+ pb.F_Fixed32Repeated[i] = uint32(i)
+ pb.F_Int64Repeated[i] = int64(i)
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_BoolRepeated[i] = i%2 == 0
+ pb.RepeatedField[i] = igtf
+ }
+
+ // Marshal.
+ buf, _ := Marshal(pb)
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ Unmarshal(buf, pbd)
+
+ // Check the checkable values
+ for i := uint64(0); i < N; i++ {
+ if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
+ t.Error("pbd.Repeatedgroup bad")
+ }
+ if x := uint64(pbd.F_Sint64Repeated[i]); x != i {
+ t.Error("pbd.F_Sint64Repeated bad", x, i)
+ }
+ if x := uint64(pbd.F_Sint32Repeated[i]); x != i {
+ t.Error("pbd.F_Sint32Repeated bad", x, i)
+ }
+ s := fmt.Sprint(i)
+ equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
+ if pbd.F_StringRepeated[i] != s {
+ t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
+ }
+ if x := uint64(pbd.F_DoubleRepeated[i]); x != i {
+ t.Error("pbd.F_DoubleRepeated bad", x, i)
+ }
+ if x := uint64(pbd.F_FloatRepeated[i]); x != i {
+ t.Error("pbd.F_FloatRepeated bad", x, i)
+ }
+ if x := pbd.F_Uint64Repeated[i]; x != i {
+ t.Error("pbd.F_Uint64Repeated bad", x, i)
+ }
+ if x := uint64(pbd.F_Uint32Repeated[i]); x != i {
+ t.Error("pbd.F_Uint32Repeated bad", x, i)
+ }
+ if x := pbd.F_Fixed64Repeated[i]; x != i {
+ t.Error("pbd.F_Fixed64Repeated bad", x, i)
+ }
+ if x := uint64(pbd.F_Fixed32Repeated[i]); x != i {
+ t.Error("pbd.F_Fixed32Repeated bad", x, i)
+ }
+ if x := uint64(pbd.F_Int64Repeated[i]); x != i {
+ t.Error("pbd.F_Int64Repeated bad", x, i)
+ }
+ if x := uint64(pbd.F_Int32Repeated[i]); x != i {
+ t.Error("pbd.F_Int32Repeated bad", x, i)
+ }
+ if x := pbd.F_BoolRepeated[i]; x != (i%2 == 0) {
+ t.Error("pbd.F_BoolRepeated bad", x, i)
+ }
+ if pbd.RepeatedField[i] == nil { // TODO: more checking?
+ t.Error("pbd.RepeatedField bad")
+ }
+ }
+}
+
+func TestBadWireTypeUnknown(t *testing.T) {
+ var b []byte
+ fmt.Sscanf("0a01780d00000000080b101612036161611521000000202c220362626225370000002203636363214200000000000000584d5a036464645900000000000056405d63000000", "%x", &b)
+
+ m := new(MyMessage)
+ if err := Unmarshal(b, m); err != nil {
+ t.Errorf("unexpected Unmarshal error: %v", err)
+ }
+
+ var unknown []byte
+ fmt.Sscanf("0a01780d0000000010161521000000202c2537000000214200000000000000584d5a036464645d63000000", "%x", &unknown)
+ if !bytes.Equal(m.XXX_unrecognized, unknown) {
+ t.Errorf("unknown bytes mismatch:\ngot %x\nwant %x", m.XXX_unrecognized, unknown)
+ }
+ DiscardUnknown(m)
+
+ want := &MyMessage{Count: Int32(11), Name: String("aaa"), Pet: []string{"bbb", "ccc"}, Bigfloat: Float64(88)}
+ if !Equal(m, want) {
+ t.Errorf("message mismatch:\ngot %v\nwant %v", m, want)
+ }
+}
+
+func encodeDecode(t *testing.T, in, out Message, msg string) {
+ buf, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("failed marshaling %v: %v", msg, err)
+ }
+ if err := Unmarshal(buf, out); err != nil {
+ t.Fatalf("failed unmarshaling %v: %v", msg, err)
+ }
+}
+
+func TestPackedNonPackedDecoderSwitching(t *testing.T) {
+ np, p := new(NonPackedTest), new(PackedTest)
+
+ // non-packed -> packed
+ np.A = []int32{0, 1, 1, 2, 3, 5}
+ encodeDecode(t, np, p, "non-packed -> packed")
+ if !reflect.DeepEqual(np.A, p.B) {
+ t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
+ }
+
+ // packed -> non-packed
+ np.Reset()
+ p.B = []int32{3, 1, 4, 1, 5, 9}
+ encodeDecode(t, p, np, "packed -> non-packed")
+ if !reflect.DeepEqual(p.B, np.A) {
+ t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
+ }
+}
+
+func TestProto1RepeatedGroup(t *testing.T) {
+ pb := &MessageList{
+ Message: []*MessageList_Message{
+ {
+ Name: String("blah"),
+ Count: Int32(7),
+ },
+ // NOTE: pb.Message[1] is a nil
+ nil,
+ },
+ }
+
+ o := old()
+ err := o.Marshal(pb)
+ if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
+ t.Fatalf("unexpected or no error when marshaling: %v", err)
+ }
+}
+
+// Test that enums work. Checks for a bug introduced by making enums
+// named types instead of int32: newInt32FromUint64 would crash with
+// a type mismatch in reflect.PointTo.
+func TestEnum(t *testing.T) {
+ pb := new(GoEnum)
+ pb.Foo = FOO_FOO1.Enum()
+ o := old()
+ if err := o.Marshal(pb); err != nil {
+ t.Fatal("error encoding enum:", err)
+ }
+ pb1 := new(GoEnum)
+ if err := o.Unmarshal(pb1); err != nil {
+ t.Fatal("error decoding enum:", err)
+ }
+ if *pb1.Foo != FOO_FOO1 {
+ t.Error("expected 7 but got ", *pb1.Foo)
+ }
+}
+
+// Enum types have String methods. Check that enum fields can be printed.
+// We don't care what the value actually is, just as long as it doesn't crash.
+func TestPrintingNilEnumFields(t *testing.T) {
+ pb := new(GoEnum)
+ _ = fmt.Sprintf("%+v", pb)
+}
+
+// Verify that absent required fields cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcement(t *testing.T) {
+ pb := new(GoTestField)
+ _, err := Marshal(pb)
+ if err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
+ // so simply counting the required fields is insufficient.
+ // field 1, encoding 2, value "hi"
+ buf := []byte("\x0A\x02hi\x0A\x02hi")
+ err = Unmarshal(buf, pb)
+ if err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Type") && !strings.Contains(err.Error(), "{Unknown}") {
+ // TODO: remove unknown cases once we commit to the new unmarshaler.
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcementGroups(t *testing.T) {
+ pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}}
+ if _, err := Marshal(pb); err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ buf := []byte{11, 12}
+ if err := Unmarshal(buf, pb); err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") && !strings.Contains(err.Error(), "Group.{Unknown}") {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+func TestTypedNilMarshal(t *testing.T) {
+ // A typed nil should return ErrNil and not crash.
+ {
+ var m *GoEnum
+ if _, err := Marshal(m); err != ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err)
+ }
+ }
+
+ {
+ m := &Communique{Union: &Communique_Msg{nil}}
+ if _, err := Marshal(m); err == nil || err == ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err)
+ }
+ }
+}
+
+// A type that implements the Marshaler interface, but is not nillable.
+type nonNillableInt uint64
+
+func (nni nonNillableInt) Marshal() ([]byte, error) {
+ return EncodeVarint(uint64(nni)), nil
+}
+
+type NNIMessage struct {
+ nni nonNillableInt
+}
+
+func (*NNIMessage) Reset() {}
+func (*NNIMessage) String() string { return "" }
+func (*NNIMessage) ProtoMessage() {}
+
+type NMMessage struct{}
+
+func (*NMMessage) Reset() {}
+func (*NMMessage) String() string { return "" }
+func (*NMMessage) ProtoMessage() {}
+
+// Verify a type that uses the Marshaler interface, but has a nil pointer.
+func TestNilMarshaler(t *testing.T) {
+ // Try a struct with a Marshaler field that is nil.
+ // It should be directly marshable.
+ nmm := new(NMMessage)
+ if _, err := Marshal(nmm); err != nil {
+ t.Error("unexpected error marshaling nmm: ", err)
+ }
+
+ // Try a struct with a Marshaler field that is not nillable.
+ nnim := new(NNIMessage)
+ nnim.nni = 7
+ var _ Marshaler = nnim.nni // verify it is truly a Marshaler
+ if _, err := Marshal(nnim); err != nil {
+ t.Error("unexpected error marshaling nnim: ", err)
+ }
+}
+
+func TestAllSetDefaults(t *testing.T) {
+ // Exercise SetDefaults with all scalar field types.
+ m := &Defaults{
+ // NaN != NaN, so override that here.
+ F_Nan: Float32(1.7),
+ }
+ expected := &Defaults{
+ F_Bool: Bool(true),
+ F_Int32: Int32(32),
+ F_Int64: Int64(64),
+ F_Fixed32: Uint32(320),
+ F_Fixed64: Uint64(640),
+ F_Uint32: Uint32(3200),
+ F_Uint64: Uint64(6400),
+ F_Float: Float32(314159),
+ F_Double: Float64(271828),
+ F_String: String(`hello, "world!"` + "\n"),
+ F_Bytes: []byte("Bignose"),
+ F_Sint32: Int32(-32),
+ F_Sint64: Int64(-64),
+ F_Enum: Defaults_GREEN.Enum(),
+ F_Pinf: Float32(float32(math.Inf(1))),
+ F_Ninf: Float32(float32(math.Inf(-1))),
+ F_Nan: Float32(1.7),
+ StrZero: String(""),
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithSetField(t *testing.T) {
+ // Check that a set value is not overridden.
+ m := &Defaults{
+ F_Int32: Int32(12),
+ }
+ SetDefaults(m)
+ if v := m.GetF_Int32(); v != 12 {
+ t.Errorf("m.FInt32 = %v, want 12", v)
+ }
+}
+
+func TestSetDefaultsWithSubMessage(t *testing.T) {
+ m := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ },
+ }
+ expected := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ Port: Int32(4000),
+ },
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
+ m := &MyMessage{
+ RepInner: []*InnerMessage{{}},
+ }
+ expected := &MyMessage{
+ RepInner: []*InnerMessage{{
+ Port: Int32(4000),
+ }},
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
+ m := &MyMessage{
+ Pet: []string{"turtle", "wombat"},
+ }
+ expected := Clone(m)
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestMaximumTagNumber(t *testing.T) {
+ m := &MaxTag{
+ LastField: String("natural goat essence"),
+ }
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal failed: %v", err)
+ }
+ m2 := new(MaxTag)
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Fatalf("proto.Unmarshal failed: %v", err)
+ }
+ if got, want := m2.GetLastField(), *m.LastField; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestJSON(t *testing.T) {
+ m := &MyMessage{
+ Count: Int32(4),
+ Pet: []string{"bunny", "kitty"},
+ Inner: &InnerMessage{
+ Host: String("cauchy"),
+ },
+ Bikeshed: MyMessage_GREEN.Enum(),
+ }
+ const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
+
+ b, err := json.Marshal(m)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ s := string(b)
+ if s != expected {
+ t.Errorf("got %s\nwant %s", s, expected)
+ }
+
+ received := new(MyMessage)
+ if err := json.Unmarshal(b, received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+
+ // Test unmarshalling of JSON with symbolic enum name.
+ const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
+ received.Reset()
+ if err := json.Unmarshal([]byte(old), received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+}
+
+func TestBadWireType(t *testing.T) {
+ b := []byte{7<<3 | 6} // field 7, wire type 6
+ pb := new(OtherMessage)
+ if err := Unmarshal(b, pb); err == nil {
+ t.Errorf("Unmarshal did not fail")
+ } else if !strings.Contains(err.Error(), "unknown wire type") {
+ t.Errorf("wrong error: %v", err)
+ }
+}
+
+func TestBytesWithInvalidLength(t *testing.T) {
+ // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
+ b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestLengthOverflow(t *testing.T) {
+ // Overflowing a length should not panic.
+ b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestVarintOverflow(t *testing.T) {
+ // Overflowing a 64-bit length should not be allowed.
+ b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
+ if err := Unmarshal(b, new(MyMessage)); err == nil {
+ t.Fatalf("Overflowed uint64 length without error")
+ }
+}
+
+func TestBytesWithInvalidLengthInGroup(t *testing.T) {
+ // Overflowing a 64-bit length should not be allowed.
+ b := []byte{0xbb, 0x30, 0xb2, 0x30, 0xb0, 0xb2, 0x83, 0xf1, 0xb0, 0xb2, 0xef, 0xbf, 0xbd, 0x01}
+ if err := Unmarshal(b, new(MyMessage)); err == nil {
+ t.Fatalf("Overflowed uint64 length without error")
+ }
+}
+
+func TestUnmarshalFuzz(t *testing.T) {
+ const N = 1000
+ seed := time.Now().UnixNano()
+ t.Logf("RNG seed is %d", seed)
+ rng := rand.New(rand.NewSource(seed))
+ buf := make([]byte, 20)
+ for i := 0; i < N; i++ {
+ for j := range buf {
+ buf[j] = byte(rng.Intn(256))
+ }
+ fuzzUnmarshal(t, buf)
+ }
+}
+
+func TestMergeMessages(t *testing.T) {
+ pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
+ data, err := Marshal(pb)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ pb1 := new(MessageList)
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("first Unmarshal: %v", err)
+ }
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("second Unmarshal: %v", err)
+ }
+ if len(pb1.Message) != 1 {
+ t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
+ }
+
+ pb2 := new(MessageList)
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("first UnmarshalMerge: %v", err)
+ }
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("second UnmarshalMerge: %v", err)
+ }
+ if len(pb2.Message) != 2 {
+ t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
+ }
+}
+
+func TestExtensionMarshalOrder(t *testing.T) {
+ m := &MyMessage{Count: Int(123)}
+ if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ var orig []byte
+ for i := 0; i < 100; i++ {
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if i == 0 {
+ orig = b
+ continue
+ }
+ if !bytes.Equal(b, orig) {
+ t.Errorf("Bytes differ on attempt #%d", i)
+ }
+ }
+}
+
+func TestExtensionMapFieldMarshalDeterministic(t *testing.T) {
+ m := &MyMessage{Count: Int(123)}
+ if err := SetExtension(m, E_Ext_More, &Ext{MapField: map[int32]int32{1: 1, 2: 2, 3: 3, 4: 4}}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ marshal := func(m Message) []byte {
+ var b Buffer
+ b.SetDeterministic(true)
+ if err := b.Marshal(m); err != nil {
+ t.Fatalf("Marshal failed: %v", err)
+ }
+ return b.Bytes()
+ }
+
+ want := marshal(m)
+ for i := 0; i < 100; i++ {
+ if got := marshal(m); !bytes.Equal(got, want) {
+ t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want)
+ }
+ }
+}
+
+// Many extensions, because small maps might not iterate differently on each iteration.
+var exts = []*ExtensionDesc{
+ E_X201,
+ E_X202,
+ E_X203,
+ E_X204,
+ E_X205,
+ E_X206,
+ E_X207,
+ E_X208,
+ E_X209,
+ E_X210,
+ E_X211,
+ E_X212,
+ E_X213,
+ E_X214,
+ E_X215,
+ E_X216,
+ E_X217,
+ E_X218,
+ E_X219,
+ E_X220,
+ E_X221,
+ E_X222,
+ E_X223,
+ E_X224,
+ E_X225,
+ E_X226,
+ E_X227,
+ E_X228,
+ E_X229,
+ E_X230,
+ E_X231,
+ E_X232,
+ E_X233,
+ E_X234,
+ E_X235,
+ E_X236,
+ E_X237,
+ E_X238,
+ E_X239,
+ E_X240,
+ E_X241,
+ E_X242,
+ E_X243,
+ E_X244,
+ E_X245,
+ E_X246,
+ E_X247,
+ E_X248,
+ E_X249,
+ E_X250,
+}
+
+func TestMessageSetMarshalOrder(t *testing.T) {
+ m := &MyMessageSet{}
+ for _, x := range exts {
+ if err := SetExtension(m, x, &Empty{}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ }
+
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ for i := 0; i < 10; i++ {
+ b1, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(b1, buf) {
+ t.Errorf("Bytes differ on re-Marshal #%d", i)
+ }
+
+ m2 := &MyMessageSet{}
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ b2, err := Marshal(m2)
+ if err != nil {
+ t.Errorf("re-Marshal: %v", err)
+ }
+ if !bytes.Equal(b2, buf) {
+ t.Errorf("Bytes differ on round-trip #%d", i)
+ }
+ }
+}
+
+func TestUnmarshalMergesMessages(t *testing.T) {
+ // If a nested message occurs twice in the input,
+ // the fields should be merged when decoding.
+ a := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("polhode"),
+ Port: Int32(1234),
+ },
+ }
+ aData, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal(a): %v", err)
+ }
+ b := &OtherMessage{
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Connected: Bool(true),
+ },
+ }
+ bData, err := Marshal(b)
+ if err != nil {
+ t.Fatalf("Marshal(b): %v", err)
+ }
+ want := &OtherMessage{
+ Key: Int64(123),
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Port: Int32(1234),
+ Connected: Bool(true),
+ },
+ }
+ got := new(OtherMessage)
+ if err := Unmarshal(append(aData, bData...), got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(got, want) {
+ t.Errorf("\n got %v\nwant %v", got, want)
+ }
+}
+
+func TestUnmarshalMergesGroups(t *testing.T) {
+ // If a nested group occurs twice in the input,
+ // the fields should be merged when decoding.
+ a := &GroupNew{
+ G: &GroupNew_G{
+ X: Int32(7),
+ Y: Int32(8),
+ },
+ }
+ aData, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal(a): %v", err)
+ }
+ b := &GroupNew{
+ G: &GroupNew_G{
+ X: Int32(9),
+ },
+ }
+ bData, err := Marshal(b)
+ if err != nil {
+ t.Fatalf("Marshal(b): %v", err)
+ }
+ want := &GroupNew{
+ G: &GroupNew_G{
+ X: Int32(9),
+ Y: Int32(8),
+ },
+ }
+ got := new(GroupNew)
+ if err := Unmarshal(append(aData, bData...), got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(got, want) {
+ t.Errorf("\n got %v\nwant %v", got, want)
+ }
+}
+
+func TestEncodingSizes(t *testing.T) {
+ tests := []struct {
+ m Message
+ n int
+ }{
+ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
+ {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
+ {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
+ {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
+ }
+ for _, test := range tests {
+ b, err := Marshal(test.m)
+ if err != nil {
+ t.Errorf("Marshal(%v): %v", test.m, err)
+ continue
+ }
+ if len(b) != test.n {
+ t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
+ }
+ }
+}
+
+func TestRequiredNotSetError(t *testing.T) {
+ pb := initGoTest(false)
+ pb.RequiredField.Label = nil
+ pb.F_Int32Required = nil
+ pb.F_Int64Required = nil
+
+ expected := "0807" + // field 1, encoding 0, value 7
+ "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
+ "5001" + // field 10, encoding 0, value 1
+ "6d20000000" + // field 13, encoding 5, value 0x20
+ "714000000000000000" + // field 14, encoding 1, value 0x40
+ "78a019" + // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45" + // field 17, encoding 5, value 3232.0
+ "9101000000000040b940" + // field 18, encoding 1, value 6464.0
+ "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
+ "b304" + // field 70, encoding 3, start group
+ "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
+ "b404" + // field 70, encoding 4, end group
+ "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
+ "b0063f" + // field 102, encoding 0, 0x3f zigzag32
+ "b8067f" + // field 103, encoding 0, 0x7f zigzag64
+ "c506e0ffffff" + // field 104, encoding 5, -32 fixed32
+ "c906c0ffffffffffffff" // field 105, encoding 1, -64 fixed64
+
+ o := old()
+ bytes, err := Marshal(pb)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+ if !strings.Contains(err.Error(), "RequiredField.Label") {
+ t.Errorf("marshal-1 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 1", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = Unmarshal(bytes, pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if !strings.Contains(err.Error(), "RequiredField.Label") && !strings.Contains(err.Error(), "RequiredField.{Unknown}") {
+ t.Errorf("unmarshal wrong err msg: %v", err)
+ }
+ bytes, err = Marshal(pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if !strings.Contains(err.Error(), "RequiredField.Label") {
+ t.Errorf("marshal-2 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 2", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+func TestRequiredNotSetErrorWithBadWireTypes(t *testing.T) {
+ // Required field expects a varint, and properly found a varint.
+ if err := Unmarshal([]byte{0x08, 0x00}, new(GoEnum)); err != nil {
+ t.Errorf("Unmarshal = %v, want nil", err)
+ }
+ // Required field expects a varint, but found a fixed32 instead.
+ if err := Unmarshal([]byte{0x0d, 0x00, 0x00, 0x00, 0x00}, new(GoEnum)); err == nil {
+ t.Errorf("Unmarshal = nil, want RequiredNotSetError")
+ }
+ // Required field expects a varint, and found both a varint and fixed32 (ignored).
+ m := new(GoEnum)
+ if err := Unmarshal([]byte{0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00}, m); err != nil {
+ t.Errorf("Unmarshal = %v, want nil", err)
+ }
+ if !bytes.Equal(m.XXX_unrecognized, []byte{0x0d, 0x00, 0x00, 0x00, 0x00}) {
+ t.Errorf("expected fixed32 to appear as unknown bytes: %x", m.XXX_unrecognized)
+ }
+}
+
+func fuzzUnmarshal(t *testing.T, data []byte) {
+ defer func() {
+ if e := recover(); e != nil {
+ t.Errorf("These bytes caused a panic: %+v", data)
+ t.Logf("Stack:\n%s", debug.Stack())
+ t.FailNow()
+ }
+ }()
+
+ pb := new(MyMessage)
+ Unmarshal(data, pb)
+}
+
+func TestMapFieldMarshal(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // b should be the concatenation of these three byte sequences in some order.
+ parts := []string{
+ "\n\a\b\x01\x12\x03Rob",
+ "\n\a\b\x04\x12\x03Ian",
+ "\n\b\b\x08\x12\x04Dave",
+ }
+ ok := false
+ for i := range parts {
+ for j := range parts {
+ if j == i {
+ continue
+ }
+ for k := range parts {
+ if k == i || k == j {
+ continue
+ }
+ try := parts[i] + parts[j] + parts[k]
+ if bytes.Equal(b, []byte(try)) {
+ ok = true
+ break
+ }
+ }
+ }
+ }
+ if !ok {
+ t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
+ }
+ t.Logf("FYI b: %q", b)
+
+ (new(Buffer)).DebugPrint("Dump of b", b)
+}
+
+func TestMapFieldDeterministicMarshal(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ }
+
+ marshal := func(m Message) []byte {
+ var b Buffer
+ b.SetDeterministic(true)
+ if err := b.Marshal(m); err != nil {
+ t.Fatalf("Marshal failed: %v", err)
+ }
+ return b.Bytes()
+ }
+
+ want := marshal(m)
+ for i := 0; i < 10; i++ {
+ if got := marshal(m); !bytes.Equal(got, want) {
+ t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want)
+ }
+ }
+}
+
+func TestMapFieldRoundTrips(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ 0x7001: {F: Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{
+ false: []byte("that's not right!"),
+ true: []byte("aye, 'tis true!"),
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("FYI b: %q", b)
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(m, m2) {
+ t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", m, m2)
+ }
+}
+
+func TestMapFieldWithNil(t *testing.T) {
+ m1 := &MessageWithMap{
+ MsgMapping: map[int64]*FloatingPoint{
+ 1: nil,
+ },
+ }
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.MsgMapping[1]; !ok {
+ t.Error("msg_mapping[1] not present")
+ } else if v != nil {
+ t.Errorf("msg_mapping[1] not nil: %v", v)
+ }
+}
+
+func TestMapFieldWithNilBytes(t *testing.T) {
+ m1 := &MessageWithMap{
+ ByteMapping: map[bool][]byte{
+ false: {},
+ true: nil,
+ },
+ }
+ n := Size(m1)
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if n != len(b) {
+ t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b))
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.ByteMapping[false]; !ok {
+ t.Error("byte_mapping[false] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[false] not empty: %#v", v)
+ }
+ if v, ok := m2.ByteMapping[true]; !ok {
+ t.Error("byte_mapping[true] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[true] not empty: %#v", v)
+ }
+}
+
+func TestDecodeMapFieldMissingKey(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes
+ // no key
+ 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m"
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing key: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestDecodeMapFieldMissingValue(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes
+ 0x08, 0x01, // varint key, value 1
+ // no value
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing value: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{1: ""}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestOneof(t *testing.T) {
+ m := &Communique{}
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of empty message with oneof: %v", err)
+ }
+ if len(b) != 0 {
+ t.Errorf("Marshal of empty message yielded too many bytes: %v", b)
+ }
+
+ m = &Communique{
+ Union: &Communique_Name{"Barry"},
+ }
+
+ // Round-trip.
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof: %v", err)
+ }
+ if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5)
+ t.Errorf("Incorrect marshal of message with oneof: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof: %v", err)
+ }
+ if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" {
+ t.Errorf("After round trip, Union = %+v", m.Union)
+ }
+ if name := m.GetName(); name != "Barry" {
+ t.Errorf("After round trip, GetName = %q, want %q", name, "Barry")
+ }
+
+ // Let's try with a message in the oneof.
+ m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}}
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof set to message: %v", err)
+ }
+ if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16)
+ t.Errorf("Incorrect marshal of message with oneof set to message: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof set to message: %v", err)
+ }
+ ss, ok := m.Union.(*Communique_Msg)
+ if !ok || ss.Msg.GetStringField() != "deep deep string" {
+ t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union)
+ }
+}
+
+func TestOneofNilBytes(t *testing.T) {
+ // A oneof with nil byte slice should marshal to tag + 0 (size), with no error.
+ m := &Communique{Union: &Communique_Data{Data: nil}}
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal failed: %v", err)
+ }
+ want := []byte{
+ 7<<3 | 2, // tag 7, wire type 2
+ 0, // size
+ }
+ if !bytes.Equal(b, want) {
+ t.Errorf("Wrong result of Marshal: got %x, want %x", b, want)
+ }
+}
+
+func TestInefficientPackedBool(t *testing.T) {
+ // https://github.com/golang/protobuf/issues/76
+ inp := []byte{
+ 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes
+ // Usually a bool should take a single byte,
+ // but it is permitted to be any varint.
+ 0xb9, 0x30,
+ }
+ if err := Unmarshal(inp, new(MoreRepeated)); err != nil {
+ t.Error(err)
+ }
+}
+
+// Make sure pure-reflect-based implementation handles
+// []int32-[]enum conversion correctly.
+func TestRepeatedEnum2(t *testing.T) {
+ pb := &RepeatedEnum{
+ Color: []RepeatedEnum_Color{RepeatedEnum_RED},
+ }
+ b, err := Marshal(pb)
+ if err != nil {
+ t.Fatalf("Marshal failed: %v", err)
+ }
+ x := new(RepeatedEnum)
+ err = Unmarshal(b, x)
+ if err != nil {
+ t.Fatalf("Unmarshal failed: %v", err)
+ }
+ if !Equal(pb, x) {
+ t.Errorf("Incorrect result: want: %v got: %v", pb, x)
+ }
+}
+
+// TestConcurrentMarshal makes sure that it is safe to marshal
+// same message in multiple goroutines concurrently.
+func TestConcurrentMarshal(t *testing.T) {
+ pb := initGoTest(true)
+ const N = 100
+ b := make([][]byte, N)
+
+ var wg sync.WaitGroup
+ for i := 0; i < N; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ var err error
+ b[i], err = Marshal(pb)
+ if err != nil {
+ t.Errorf("marshal error: %v", err)
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ for i := 1; i < N; i++ {
+ if !bytes.Equal(b[0], b[i]) {
+ t.Errorf("concurrent marshal result not same: b[0] = %v, b[%d] = %v", b[0], i, b[i])
+ }
+ }
+}
+
+func TestInvalidUTF8(t *testing.T) {
+ const invalidUTF8 = "\xde\xad\xbe\xef\x80\x00\xff"
+ tests := []struct {
+ label string
+ proto2 Message
+ proto3 Message
+ want []byte
+ }{{
+ label: "Scalar",
+ proto2: &TestUTF8{Scalar: String(invalidUTF8)},
+ proto3: &pb3.TestUTF8{Scalar: invalidUTF8},
+ want: []byte{0x0a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff},
+ }, {
+ label: "Vector",
+ proto2: &TestUTF8{Vector: []string{invalidUTF8}},
+ proto3: &pb3.TestUTF8{Vector: []string{invalidUTF8}},
+ want: []byte{0x12, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff},
+ }, {
+ label: "Oneof",
+ proto2: &TestUTF8{Oneof: &TestUTF8_Field{invalidUTF8}},
+ proto3: &pb3.TestUTF8{Oneof: &pb3.TestUTF8_Field{invalidUTF8}},
+ want: []byte{0x1a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff},
+ }, {
+ label: "MapKey",
+ proto2: &TestUTF8{MapKey: map[string]int64{invalidUTF8: 0}},
+ proto3: &pb3.TestUTF8{MapKey: map[string]int64{invalidUTF8: 0}},
+ want: []byte{0x22, 0x0b, 0x0a, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff, 0x10, 0x00},
+ }, {
+ label: "MapValue",
+ proto2: &TestUTF8{MapValue: map[int64]string{0: invalidUTF8}},
+ proto3: &pb3.TestUTF8{MapValue: map[int64]string{0: invalidUTF8}},
+ want: []byte{0x2a, 0x0b, 0x08, 0x00, 0x12, 0x07, 0xde, 0xad, 0xbe, 0xef, 0x80, 0x00, 0xff},
+ }}
+
+ for _, tt := range tests {
+ // Proto2 should not validate UTF-8.
+ b, err := Marshal(tt.proto2)
+ if err != nil {
+ t.Errorf("Marshal(proto2.%s) = %v, want nil", tt.label, err)
+ }
+ if !bytes.Equal(b, tt.want) {
+ t.Errorf("Marshal(proto2.%s) = %x, want %x", tt.label, b, tt.want)
+ }
+
+ m := Clone(tt.proto2)
+ m.Reset()
+ if err = Unmarshal(tt.want, m); err != nil {
+ t.Errorf("Unmarshal(proto2.%s) = %v, want nil", tt.label, err)
+ }
+ if !Equal(m, tt.proto2) {
+ t.Errorf("proto2.%s: output mismatch:\ngot %v\nwant %v", tt.label, m, tt.proto2)
+ }
+
+ // Proto3 should validate UTF-8.
+ b, err = Marshal(tt.proto3)
+ if err == nil {
+ t.Errorf("Marshal(proto3.%s) = %v, want non-nil", tt.label, err)
+ }
+ if !bytes.Equal(b, tt.want) {
+ t.Errorf("Marshal(proto3.%s) = %x, want %x", tt.label, b, tt.want)
+ }
+
+ m = Clone(tt.proto3)
+ m.Reset()
+ err = Unmarshal(tt.want, m)
+ if err == nil {
+ t.Errorf("Unmarshal(proto3.%s) = %v, want non-nil", tt.label, err)
+ }
+ if !Equal(m, tt.proto3) {
+ t.Errorf("proto3.%s: output mismatch:\ngot %v\nwant %v", tt.label, m, tt.proto2)
+ }
+ }
+}
+
+func TestRequired(t *testing.T) {
+ // The F_BoolRequired field appears after all of the required fields.
+ // It should still be handled even after multiple required field violations.
+ m := &GoTest{F_BoolRequired: Bool(true)}
+ got, err := Marshal(m)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("Marshal() = %v, want RequiredNotSetError error", err)
+ }
+ if want := []byte{0x50, 0x01}; !bytes.Equal(got, want) {
+ t.Errorf("Marshal() = %x, want %x", got, want)
+ }
+
+ m = new(GoTest)
+ err = Unmarshal(got, m)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("Marshal() = %v, want RequiredNotSetError error", err)
+ }
+ if !m.GetF_BoolRequired() {
+ t.Error("m.F_BoolRequired = false, want true")
+ }
+}
+
+// Benchmarks
+
+func testMsg() *GoTest {
+ pb := initGoTest(true)
+ const N = 1000 // Internally the library starts much smaller.
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ for i := 0; i < N; i++ {
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_DoubleRepeated[i] = float64(i)
+ }
+ return pb
+}
+
+func bytesMsg() *GoTest {
+ pb := initGoTest(true)
+ buf := make([]byte, 4000)
+ for i := range buf {
+ buf[i] = byte(i)
+ }
+ pb.F_BytesDefaulted = buf
+ return pb
+}
+
+func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
+ d, _ := marshal(pb)
+ b.SetBytes(int64(len(d)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ marshal(pb)
+ }
+}
+
+func benchmarkBufferMarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ p.Reset()
+ err := p.Marshal(pb0)
+ return p.Bytes(), err
+ })
+}
+
+func benchmarkSize(b *testing.B, pb Message) {
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ Size(pb)
+ return nil, nil
+ })
+}
+
+func newOf(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+ return reflect.New(in.Type().Elem()).Interface().(Message)
+}
+
+func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
+ d, _ := Marshal(pb)
+ b.SetBytes(int64(len(d)))
+ pbd := newOf(pb)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ unmarshal(d, pbd)
+ }
+}
+
+func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
+ p.SetBuf(d)
+ return p.Unmarshal(pb0)
+ })
+}
+
+// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
+
+func BenchmarkMarshal(b *testing.B) {
+ benchmarkMarshal(b, testMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshal(b *testing.B) {
+ benchmarkBufferMarshal(b, testMsg())
+}
+
+func BenchmarkSize(b *testing.B) {
+ benchmarkSize(b, testMsg())
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ benchmarkUnmarshal(b, testMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshal(b *testing.B) {
+ benchmarkBufferUnmarshal(b, testMsg())
+}
+
+func BenchmarkMarshalBytes(b *testing.B) {
+ benchmarkMarshal(b, bytesMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshalBytes(b *testing.B) {
+ benchmarkBufferMarshal(b, bytesMsg())
+}
+
+func BenchmarkSizeBytes(b *testing.B) {
+ benchmarkSize(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalBytes(b *testing.B) {
+ benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshalBytes(b *testing.B) {
+ benchmarkBufferUnmarshal(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
+ b.StopTimer()
+ pb := initGoTestField()
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ pbd := new(GoTestField)
+ p := NewBuffer(nil)
+ p.Marshal(pb)
+ p.Marshal(skip)
+ p2 := NewBuffer(nil)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ p2.SetBuf(p.Bytes())
+ p2.Unmarshal(pbd)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go
new file mode 100644
index 0000000..56fc97c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/any_test.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ testpb "github.com/golang/protobuf/proto/test_proto"
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+var (
+ expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
+ expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
+)
+
+// anyEqual reports whether two messages which may be google.protobuf.Any or may
+// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
+// comparison, because semantically equivalent messages may be marshaled to
+// binary in different tag order. Instead, trust that TextMarshaler with
+// ExpandAny option works and compare the text marshaling results.
+func anyEqual(got, want proto.Message) bool {
+ // if messages are proto.Equal, no need to marshal.
+ if proto.Equal(got, want) {
+ return true
+ }
+ g := expandedMarshaler.Text(got)
+ w := expandedMarshaler.Text(want)
+ return g == w
+}
+
+type golden struct {
+ m proto.Message
+ t, c string
+}
+
+var goldenMessages = makeGolden()
+
+func makeGolden() []golden {
+ nested := &pb.Nested{Bunny: "Monty"}
+ nb, err := proto.Marshal(nested)
+ if err != nil {
+ panic(err)
+ }
+ m1 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m2 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m3 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
+ }
+ m4 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
+ }
+ m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
+
+ any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
+ proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
+ proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
+ any1b, err := proto.Marshal(any1)
+ if err != nil {
+ panic(err)
+ }
+ any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
+ proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
+ any2b, err := proto.Marshal(any2)
+ if err != nil {
+ panic(err)
+ }
+ m6 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ ManyThings: []*anypb.Any{
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ },
+ }
+
+ const (
+ m1Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m2Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m3Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["type.googleapis.com/\"/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m4Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m5Golden = `
+[type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+>
+`
+ m6Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/test_proto.MyMessage]: <
+ count: 47
+ name: "David"
+ [test_proto.Ext.more]: <
+ data: "foo"
+ >
+ [test_proto.Ext.text]: "bar"
+ >
+>
+many_things: <
+ [type.googleapis.com/test_proto.MyMessage]: <
+ count: 42
+ bikeshed: GREEN
+ rep_bytes: "roboto"
+ [test_proto.Ext.more]: <
+ data: "baz"
+ >
+ >
+>
+many_things: <
+ [type.googleapis.com/test_proto.MyMessage]: <
+ count: 47
+ name: "David"
+ [test_proto.Ext.more]: <
+ data: "foo"
+ >
+ [test_proto.Ext.text]: "bar"
+ >
+>
+`
+ )
+ return []golden{
+ {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
+ {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
+ {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
+ {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
+ {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
+ {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
+ }
+}
+
+func TestMarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
+ t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
+ }
+ if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
+ t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
+ }
+ }
+}
+
+func TestUnmarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ want := tt.m
+ got := proto.Clone(tt.m)
+ got.Reset()
+ if err := proto.UnmarshalText(tt.t, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
+ }
+ got.Reset()
+ if err := proto.UnmarshalText(tt.c, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
+ }
+ }
+}
+
+func TestMarshalUnknownAny(t *testing.T) {
+ m := &pb.Message{
+ Anything: &anypb.Any{
+ TypeUrl: "foo",
+ Value: []byte("bar"),
+ },
+ }
+ want := `anything: <
+ type_url: "foo"
+ value: "bar"
+>
+`
+ got := expandedMarshaler.Text(m)
+ if got != want {
+ t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
+ }
+}
+
+func TestAmbiguousAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ type_url: "ttt/proto3_proto.Nested"
+ value: "\n\x05Monty"
+ `, pb)
+ t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
+ if err != nil {
+ t.Errorf("failed to parse ambiguous Any message: %v", err)
+ }
+}
+
+func TestUnmarshalOverwriteAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 7: Any message unpacked multiple times, or "type_url" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
+
+func TestUnmarshalAnyMixAndMatch(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ value: "\n\x05Monty"
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 5: Any message unpacked multiple times, or "value" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go
new file mode 100644
index 0000000..b04989e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone_test.go
@@ -0,0 +1,406 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/test_proto"
+)
+
+var cloneTestMessage = &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+}
+
+func init() {
+ ext := &pb.Ext{
+ Data: proto.String("extension"),
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_Text, proto.String("hello")); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Greeting, []string{"one", "two"}); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+}
+
+func TestClone(t *testing.T) {
+ // Create a clone using a marshal/unmarshal roundtrip.
+ vanilla := new(pb.MyMessage)
+ b, err := proto.Marshal(cloneTestMessage)
+ if err != nil {
+ t.Errorf("unexpected Marshal error: %v", err)
+ }
+ if err := proto.Unmarshal(b, vanilla); err != nil {
+ t.Errorf("unexpected Unarshal error: %v", err)
+ }
+
+ // Create a clone using Clone and verify that it is equal to the original.
+ m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
+ if !proto.Equal(m, cloneTestMessage) {
+ t.Fatalf("Clone(%v) = %v", cloneTestMessage, m)
+ }
+
+ // Mutate the clone, which should not affect the original.
+ x1, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Errorf("unexpected GetExtension(%v) error: %v", pb.E_Ext_More.Name, err)
+ }
+ x2, err := proto.GetExtension(m, pb.E_Ext_Text)
+ if err != nil {
+ t.Errorf("unexpected GetExtension(%v) error: %v", pb.E_Ext_Text.Name, err)
+ }
+ x3, err := proto.GetExtension(m, pb.E_Greeting)
+ if err != nil {
+ t.Errorf("unexpected GetExtension(%v) error: %v", pb.E_Greeting.Name, err)
+ }
+ *m.Inner.Port++
+ *(x1.(*pb.Ext)).Data = "blah blah"
+ *(x2.(*string)) = "goodbye"
+ x3.([]string)[0] = "zero"
+ if !proto.Equal(cloneTestMessage, vanilla) {
+ t.Fatalf("mutation on original detected:\ngot %v\nwant %v", cloneTestMessage, vanilla)
+ }
+}
+
+func TestCloneNil(t *testing.T) {
+ var m *pb.MyMessage
+ if c := proto.Clone(m); !proto.Equal(m, c) {
+ t.Errorf("Clone(%v) = %v", m, c)
+ }
+}
+
+var mergeTests = []struct {
+ src, dst, want proto.Message
+}{
+ {
+ src: &pb.MyMessage{
+ Count: proto.Int32(42),
+ },
+ dst: &pb.MyMessage{
+ Name: proto.String("Dave"),
+ },
+ want: &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ },
+ Pet: []string{"horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ dst: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {
+ // Explicitly test a src=nil field
+ Inner: nil,
+ },
+ },
+ },
+ want: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {},
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ RepBytes: [][]byte{[]byte("wow")},
+ },
+ dst: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham")},
+ },
+ want: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+ },
+ },
+ // Check that a scalar bytes field replaces rather than appends.
+ {
+ src: &pb.OtherMessage{Value: []byte("foo")},
+ dst: &pb.OtherMessage{Value: []byte("bar")},
+ want: &pb.OtherMessage{Value: []byte("foo")},
+ },
+ {
+ src: &pb.MessageWithMap{
+ NameMapping: map[int32]string{6: "Nigel"},
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ dst: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Bruce", // should be overwritten
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(3.0),
+ Exact: proto.Bool(true),
+ }, // the entire message should be overwritten
+ },
+ },
+ want: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Nigel",
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ },
+ // proto3 shouldn't merge zero values,
+ // in the same way that proto2 shouldn't merge nils.
+ {
+ src: &proto3pb.Message{
+ Name: "Aaron",
+ Data: []byte(""), // zero value, but not nil
+ },
+ dst: &proto3pb.Message{
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ want: &proto3pb.Message{
+ Name: "Aaron",
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ },
+ { // Oneof fields should merge by assignment.
+ src: &pb.Communique{Union: &pb.Communique_Number{41}},
+ dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
+ want: &pb.Communique{Union: &pb.Communique_Number{41}},
+ },
+ { // Oneof nil is the same as not set.
+ src: &pb.Communique{},
+ dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
+ want: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Number{1337}},
+ dst: &pb.Communique{},
+ want: &pb.Communique{Union: &pb.Communique_Number{1337}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}},
+ dst: &pb.Communique{},
+ want: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}},
+ dst: &pb.Communique{},
+ want: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}},
+ dst: &pb.Communique{},
+ want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Msg{}},
+ dst: &pb.Communique{},
+ want: &pb.Communique{Union: &pb.Communique_Msg{}},
+ },
+ {
+ src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123")}}},
+ dst: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}},
+ want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123"), BytesField: []byte{1, 2, 3}}}},
+ },
+ {
+ src: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true}, // replace
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
+ },
+ },
+ dst: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
+ },
+ },
+ want: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true},
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"},
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"},
+ },
+ },
+ },
+ {
+ src: &pb.GoTest{
+ F_BoolRepeated: []bool{},
+ F_Int32Repeated: []int32{},
+ F_Int64Repeated: []int64{},
+ F_Uint32Repeated: []uint32{},
+ F_Uint64Repeated: []uint64{},
+ F_FloatRepeated: []float32{},
+ F_DoubleRepeated: []float64{},
+ F_StringRepeated: []string{},
+ F_BytesRepeated: [][]byte{},
+ },
+ dst: &pb.GoTest{},
+ want: &pb.GoTest{
+ F_BoolRepeated: []bool{},
+ F_Int32Repeated: []int32{},
+ F_Int64Repeated: []int64{},
+ F_Uint32Repeated: []uint32{},
+ F_Uint64Repeated: []uint64{},
+ F_FloatRepeated: []float32{},
+ F_DoubleRepeated: []float64{},
+ F_StringRepeated: []string{},
+ F_BytesRepeated: [][]byte{},
+ },
+ },
+ {
+ src: &pb.GoTest{},
+ dst: &pb.GoTest{
+ F_BoolRepeated: []bool{},
+ F_Int32Repeated: []int32{},
+ F_Int64Repeated: []int64{},
+ F_Uint32Repeated: []uint32{},
+ F_Uint64Repeated: []uint64{},
+ F_FloatRepeated: []float32{},
+ F_DoubleRepeated: []float64{},
+ F_StringRepeated: []string{},
+ F_BytesRepeated: [][]byte{},
+ },
+ want: &pb.GoTest{
+ F_BoolRepeated: []bool{},
+ F_Int32Repeated: []int32{},
+ F_Int64Repeated: []int64{},
+ F_Uint32Repeated: []uint32{},
+ F_Uint64Repeated: []uint64{},
+ F_FloatRepeated: []float32{},
+ F_DoubleRepeated: []float64{},
+ F_StringRepeated: []string{},
+ F_BytesRepeated: [][]byte{},
+ },
+ },
+ {
+ src: &pb.GoTest{
+ F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}},
+ },
+ dst: &pb.GoTest{},
+ want: &pb.GoTest{
+ F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}},
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ Others: []*pb.OtherMessage{},
+ },
+ dst: &pb.MyMessage{},
+ want: &pb.MyMessage{
+ Others: []*pb.OtherMessage{},
+ },
+ },
+}
+
+func TestMerge(t *testing.T) {
+ for _, m := range mergeTests {
+ got := proto.Clone(m.dst)
+ if !proto.Equal(got, m.dst) {
+ t.Errorf("Clone()\ngot %v\nwant %v", got, m.dst)
+ continue
+ }
+ proto.Merge(got, m.src)
+ if !proto.Equal(got, m.want) {
+ t.Errorf("Merge(%v, %v)\ngot %v\nwant %v", m.dst, m.src, got, m.want)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go
new file mode 100644
index 0000000..949be3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode_test.go
@@ -0,0 +1,255 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build go1.7
+
+package proto_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+var msgBlackhole = new(tpb.Message)
+
+// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
+// 2 bytes long).
+func BenchmarkVarint32ArraySmall(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go
new file mode 100644
index 0000000..a720947
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode_test.go
@@ -0,0 +1,85 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build go1.7
+
+package proto_test
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+ "github.com/golang/protobuf/ptypes"
+)
+
+var (
+ blackhole []byte
+)
+
+// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
+// same.
+func BenchmarkAny(b *testing.B) {
+ data := make([]byte, 1<<20)
+ quantum := 1 << 10
+ for i := uint(0); i <= 10; i++ {
+ b.Run(strconv.Itoa(quantum<unmarshal.
+}
+
+func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
+ // Add a repeated extension to the result.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ // Marshal message with a repeated extension.
+ msg1 := new(pb.OtherMessage)
+ err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension: %v", test.name, err)
+ }
+ b, err := proto.Marshal(msg1)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err = proto.Unmarshal(b, msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_RComplex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.([]*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if len(ext) != len(test.ext) {
+ t.Errorf("[%s] Wrong length of ComplexExtension: got: %v want: %v\n", test.name, len(ext), len(test.ext))
+ }
+ for i := range test.ext {
+ if !proto.Equal(ext[i], test.ext[i]) {
+ t.Errorf("[%s] Wrong value for ComplexExtension[%d]: got: %v want: %v\n", test.name, i, ext[i], test.ext[i])
+ }
+ }
+ }
+}
+
+func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
+ // We may see multiple instances of the same extension in the wire
+ // format. For example, the proto compiler may encode custom options in
+ // this way. Here, we verify that we merge the extensions together.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ var buf bytes.Buffer
+ var want pb.ComplexExtension
+
+ // Generate a serialized representation of a repeated extension
+ // by catenating bytes together.
+ for i, e := range test.ext {
+ // Merge to create the wanted proto.
+ proto.Merge(&want, e)
+
+ // serialize the message
+ msg := new(pb.OtherMessage)
+ err := proto.SetExtension(msg, pb.E_Complex, e)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
+ }
+ b, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
+ }
+ buf.Write(b)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err := proto.Unmarshal(buf.Bytes(), msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_Complex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.(*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if !proto.Equal(ext, &want) {
+ t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, &want)
+ }
+ }
+}
+
+func TestClearAllExtensions(t *testing.T) {
+ // unregistered extension
+ desc := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 101010100,
+ Name: "emptyextension",
+ Tag: "varint,0,opt",
+ }
+ m := &pb.MyMessage{}
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+ if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
+ t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+ if !proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
+ }
+ proto.ClearAllExtensions(m)
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+}
+
+func TestMarshalRace(t *testing.T) {
+ ext := &pb.Ext{}
+ m := &pb.MyMessage{Count: proto.Int32(4)}
+ if err := proto.SetExtension(m, pb.E_Ext_More, ext); err != nil {
+ t.Fatalf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("Could not marshal message: %v", err)
+ }
+ if err := proto.Unmarshal(b, m); err != nil {
+ t.Fatalf("Could not unmarshal message: %v", err)
+ }
+ // after Unmarshal, the extension is in undecoded form.
+ // GetExtension will decode it lazily. Make sure this does
+ // not race against Marshal.
+
+ wg := sync.WaitGroup{}
+ errs := make(chan error, 3)
+ for n := 3; n > 0; n-- {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, err := proto.Marshal(m)
+ errs <- err
+ }()
+ }
+ wg.Wait()
+ close(errs)
+
+ for err = range errs {
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go
new file mode 100644
index 0000000..b1e1529
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/map_test.go
@@ -0,0 +1,70 @@
+package proto_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ ppb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+func TestMap(t *testing.T) {
+ var b []byte
+ fmt.Sscanf("a2010c0a044b657931120456616c31a201130a044b657932120556616c3261120456616c32a201240a044b6579330d05000000120556616c33621a0556616c3361120456616c331505000000a20100a201260a044b657934130a07536f6d6555524c1209536f6d655469746c651a08536e69707065743114", "%x", &b)
+
+ var m ppb.Message
+ if err := proto.Unmarshal(b, &m); err != nil {
+ t.Fatalf("proto.Unmarshal error: %v", err)
+ }
+
+ got := m.StringMap
+ want := map[string]string{
+ "": "",
+ "Key1": "Val1",
+ "Key2": "Val2",
+ "Key3": "Val3",
+ "Key4": "",
+ }
+
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("maps differ:\ngot %#v\nwant %#v", got, want)
+ }
+}
+
+func marshalled() []byte {
+ m := &ppb.IntMaps{}
+ for i := 0; i < 1000; i++ {
+ m.Maps = append(m.Maps, &ppb.IntMap{
+ Rtt: map[int32]int32{1: 2},
+ })
+ }
+ b, err := proto.Marshal(m)
+ if err != nil {
+ panic(fmt.Sprintf("Can't marshal %+v: %v", m, err))
+ }
+ return b
+}
+
+func BenchmarkConcurrentMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+ })
+}
+
+func BenchmarkSequentialMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go
new file mode 100644
index 0000000..1bd11aa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go
@@ -0,0 +1,88 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ . "github.com/golang/protobuf/proto/test_proto"
+)
+
+func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
+ /*
+ Message{
+ Tag{1, StartGroup},
+ Message{
+ Tag{2, Varint}, Uvarint(12345),
+ Tag{3, Bytes}, Bytes("hoo"),
+ },
+ Tag{1, EndGroup},
+ Tag{1, StartGroup},
+ Message{
+ Tag{2, Varint}, Uvarint(12345),
+ Tag{3, Bytes}, Bytes("hah"),
+ },
+ Tag{1, EndGroup},
+ }
+ */
+ var in []byte
+ fmt.Sscanf("0b10b9601a03686f6f0c0b10b9601a036861680c", "%x", &in)
+
+ /*
+ Message{
+ Tag{1, StartGroup},
+ Message{
+ Tag{2, Varint}, Uvarint(12345),
+ Tag{3, Bytes}, Bytes("hoohah"),
+ },
+ Tag{1, EndGroup},
+ }
+ */
+ var want []byte
+ fmt.Sscanf("0b10b9601a06686f6f6861680c", "%x", &want)
+
+ var m MyMessageSet
+ if err := proto.Unmarshal(in, &m); err != nil {
+ t.Fatalf("unexpected Unmarshal error: %v", err)
+ }
+ got, err := proto.Marshal(&m)
+ if err != nil {
+ t.Fatalf("unexpected Marshal error: %v", err)
+ }
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("output mismatch:\ngot %x\nwant %x", got, want)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ }
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..79668ff
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,545 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ return prop
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
+ }
+ if len(oots) > 0 {
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go
new file mode 100644
index 0000000..73eed6c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go
@@ -0,0 +1,151 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ tpb "github.com/golang/protobuf/proto/test_proto"
+)
+
+func TestProto3ZeroValues(t *testing.T) {
+ tests := []struct {
+ desc string
+ m proto.Message
+ }{
+ {"zero message", &pb.Message{}},
+ {"empty bytes field", &pb.Message{Data: []byte{}}},
+ }
+ for _, test := range tests {
+ b, err := proto.Marshal(test.m)
+ if err != nil {
+ t.Errorf("%s: proto.Marshal: %v", test.desc, err)
+ continue
+ }
+ if len(b) > 0 {
+ t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
+ }
+ }
+}
+
+func TestRoundTripProto3(t *testing.T) {
+ m := &pb.Message{
+ Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
+ Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
+ HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
+ Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
+ ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
+ TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
+ Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
+
+ Key: []uint64{1, 0xdeadbeef},
+ Nested: &pb.Nested{
+ Bunny: "Monty",
+ },
+ }
+ t.Logf(" m: %v", m)
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ t.Logf(" b: %q", b)
+
+ m2 := new(pb.Message)
+ if err := proto.Unmarshal(b, m2); err != nil {
+ t.Fatalf("proto.Unmarshal: %v", err)
+ }
+ t.Logf("m2: %v", m2)
+
+ if !proto.Equal(m, m2) {
+ t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
+ }
+}
+
+func TestGettersForBasicTypesExist(t *testing.T) {
+ var m pb.Message
+ if got := m.GetNested().GetBunny(); got != "" {
+ t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got)
+ }
+ if got := m.GetNested().GetCute(); got {
+ t.Errorf("m.GetNested().GetCute() = %t, want false", got)
+ }
+}
+
+func TestProto3SetDefaults(t *testing.T) {
+ in := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: new(tpb.SubDefaults),
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": new(tpb.SubDefaults),
+ },
+ }
+
+ got := proto.Clone(in).(*pb.Message)
+ proto.SetDefaults(got)
+
+ // There are no defaults in proto3. Everything should be the zero value, but
+ // we need to remember to set defaults for nested proto2 messages.
+ want := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": &tpb.SubDefaults{N: proto.Int64(7)},
+ },
+ }
+
+ if !proto.Equal(got, want) {
+ t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
+ }
+}
+
+func TestUnknownFieldPreservation(t *testing.T) {
+ b1 := "\x0a\x05David" // Known tag 1
+ b2 := "\xc2\x0c\x06Google" // Unknown tag 200
+ b := []byte(b1 + b2)
+
+ m := new(pb.Message)
+ if err := proto.Unmarshal(b, m); err != nil {
+ t.Fatalf("proto.Unmarshal: %v", err)
+ }
+
+ if !bytes.Equal(m.XXX_unrecognized, []byte(b2)) {
+ t.Fatalf("mismatching unknown fields:\ngot %q\nwant %q", m.XXX_unrecognized, b2)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go
new file mode 100644
index 0000000..0b8eb85
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/size2_test.go
@@ -0,0 +1,64 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "math"
+ "testing"
+)
+
+// This is a separate file and package from size_test.go because that one uses
+// generated messages and thus may not be in package proto without having a circular
+// dependency, whereas this file tests unexported details of size.go.
+
+func TestVarintSize(t *testing.T) {
+ // Check the edge cases carefully.
+ testCases := []struct {
+ n uint64
+ size int
+ }{
+ {0, 1},
+ {1, 1},
+ {127, 1},
+ {128, 2},
+ {16383, 2},
+ {16384, 3},
+ {math.MaxInt64, 9},
+ {math.MaxInt64 + 1, 10},
+ }
+ for _, tc := range testCases {
+ size := SizeVarint(tc.n)
+ if size != tc.size {
+ t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go
new file mode 100644
index 0000000..3abac41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/size_test.go
@@ -0,0 +1,191 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "log"
+ "strings"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/test_proto"
+)
+
+var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
+
+// messageWithExtension2 is in equal_test.go.
+var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+ if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+
+ // Force messageWithExtension3 to have the extension encoded.
+ Marshal(messageWithExtension3)
+
+}
+
+// non-pointer custom message
+type nonptrMessage struct{}
+
+func (m nonptrMessage) ProtoMessage() {}
+func (m nonptrMessage) Reset() {}
+func (m nonptrMessage) String() string { return "" }
+
+func (m nonptrMessage) Marshal() ([]byte, error) {
+ return []byte{42}, nil
+}
+
+// custom message embedding a proto.Message
+type messageWithEmbedding struct {
+ *pb.OtherMessage
+}
+
+func (m *messageWithEmbedding) ProtoMessage() {}
+func (m *messageWithEmbedding) Reset() {}
+func (m *messageWithEmbedding) String() string { return "" }
+
+func (m *messageWithEmbedding) Marshal() ([]byte, error) {
+ return []byte{42}, nil
+}
+
+var SizeTests = []struct {
+ desc string
+ pb Message
+}{
+ {"empty", &pb.OtherMessage{}},
+ // Basic types.
+ {"bool", &pb.Defaults{F_Bool: Bool(true)}},
+ {"int32", &pb.Defaults{F_Int32: Int32(12)}},
+ {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
+ {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
+ {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
+ {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
+ {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
+ {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
+ {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
+ {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
+ {"float", &pb.Defaults{F_Float: Float32(12.6)}},
+ {"double", &pb.Defaults{F_Double: Float64(13.9)}},
+ {"string", &pb.Defaults{F_String: String("niles")}},
+ {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
+ {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
+ {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
+ {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
+ {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
+ // Repeated.
+ {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
+ {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
+ {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
+ {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
+ {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
+ {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
+ // Need enough large numbers to verify that the header is counting the number of bytes
+ // for the field, not the number of elements.
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ }}},
+ {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
+ {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
+ // Nested.
+ {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
+ {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
+ // Other things.
+ {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
+ {"extension (unencoded)", messageWithExtension1},
+ {"extension (encoded)", messageWithExtension3},
+ // proto3 message
+ {"proto3 empty", &proto3pb.Message{}},
+ {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
+ {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
+ {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
+ {"proto3 float", &proto3pb.Message{Score: 12.6}},
+ {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
+ {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
+ {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
+ {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+ {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
+
+ {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
+ {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
+ {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
+ {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
+
+ {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
+ {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
+ {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
+
+ {"oneof not set", &pb.Oneof{}},
+ {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
+ {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
+ {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
+ {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
+ {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
+ {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
+ {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
+ {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
+ {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
+ {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
+ {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
+ {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
+ {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
+ {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
+ {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
+ {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
+ {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
+ {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
+ {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
+ {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
+
+ {"non-pointer message", nonptrMessage{}},
+ {"custom message with embedding", &messageWithEmbedding{&pb.OtherMessage{}}},
+}
+
+func TestSize(t *testing.T) {
+ for _, tc := range SizeTests {
+ size := Size(tc.pb)
+ b, err := Marshal(tc.pb)
+ if err != nil {
+ t.Errorf("%v: Marshal failed: %v", tc.desc, err)
+ continue
+ }
+ if size != len(b) {
+ t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
+ t.Logf("%v: bytes: %#v", tc.desc, b)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
+ }
+
+ // Get extension ranges, if any.
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) == 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go
new file mode 100644
index 0000000..a819808
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go
@@ -0,0 +1,706 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "fmt"
+ "math"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ . "github.com/golang/protobuf/proto/test_proto"
+)
+
+type UnmarshalTextTest struct {
+ in string
+ err string // if "", no error expected
+ out *MyMessage
+}
+
+func buildExtStructTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_More, &Ext{
+ Data: String("Hello, world!"),
+ })
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtDataTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_Text, String("Hello, world!"))
+ SetExtension(msg, E_Ext_Number, Int32(1729))
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtRepStringTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
+ panic(err)
+ }
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+var unMarshalTextTests = []UnmarshalTextTest{
+ // Basic
+ {
+ in: " count:42\n name:\"Dave\" ",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ },
+ },
+
+ // Empty quoted string
+ {
+ in: `count:42 name:""`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(""),
+ },
+ },
+
+ // Quoted string concatenation with double quotes
+ {
+ in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenation with single quotes
+ {
+ in: "count:42 name: 'My name is '\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenations with mixed quotes
+ {
+ in: "count:42 name: 'My name is '\n\"elsewhere\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+ {
+ in: "count:42 name: \"My name is \"\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string with escaped apostrophe
+ {
+ in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("HOLIDAY - New Year's Day"),
+ },
+ },
+
+ // Quoted string with single quote
+ {
+ in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`Roger "The Ramster" Ramjet`),
+ },
+ },
+
+ // Quoted string with all the accepted special characters from the C++ test
+ {
+ in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
+ },
+ },
+
+ // Quoted string with quoted backslash
+ {
+ in: `count:42 name: "\\'xyz"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`\'xyz`),
+ },
+ },
+
+ // Quoted string with UTF-8 bytes.
+ {
+ in: "count:42 name: '\303\277\302\201\x00\xAB\xCD\xEF'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\303\277\302\201\x00\xAB\xCD\xEF"),
+ },
+ },
+
+ // Quoted string with unicode escapes.
+ {
+ in: `count: 42 name: "\u0047\U00000047\uffff\U0010ffff"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("GG\uffff\U0010ffff"),
+ },
+ },
+
+ // Bad quoted string
+ {
+ in: `inner: < host: "\0" >` + "\n",
+ err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
+ },
+
+ // Bad \u escape
+ {
+ in: `count: 42 name: "\u000"`,
+ err: `line 1.16: invalid quoted string "\u000": \u requires 4 following digits`,
+ },
+
+ // Bad \U escape
+ {
+ in: `count: 42 name: "\U0000000"`,
+ err: `line 1.16: invalid quoted string "\U0000000": \U requires 8 following digits`,
+ },
+
+ // Bad \U escape
+ {
+ in: `count: 42 name: "\xxx"`,
+ err: `line 1.16: invalid quoted string "\xxx": \xxx contains non-hexadecimal digits`,
+ },
+
+ // Number too large for int64
+ {
+ in: "count: 1 others { key: 123456789012345678901 }",
+ err: "line 1.23: invalid int64: 123456789012345678901",
+ },
+
+ // Number too large for int32
+ {
+ in: "count: 1234567890123",
+ err: "line 1.7: invalid int32: 1234567890123",
+ },
+
+ // Number in hexadecimal
+ {
+ in: "count: 0x2beef",
+ out: &MyMessage{
+ Count: Int32(0x2beef),
+ },
+ },
+
+ // Number in octal
+ {
+ in: "count: 024601",
+ out: &MyMessage{
+ Count: Int32(024601),
+ },
+ },
+
+ // Floating point number with "f" suffix
+ {
+ in: "count: 4 others:< weight: 17.0f >",
+ out: &MyMessage{
+ Count: Int32(4),
+ Others: []*OtherMessage{
+ {
+ Weight: Float32(17),
+ },
+ },
+ },
+ },
+
+ // Floating point positive infinity
+ {
+ in: "count: 4 bigfloat: inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(1)),
+ },
+ },
+
+ // Floating point negative infinity
+ {
+ in: "count: 4 bigfloat: -inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(-1)),
+ },
+ },
+
+ // Number too large for float32
+ {
+ in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
+ err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
+ },
+
+ // Number posing as a quoted string
+ {
+ in: `inner: < host: 12 >` + "\n",
+ err: `line 1.15: invalid string: 12`,
+ },
+
+ // Quoted string posing as int32
+ {
+ in: `count: "12"`,
+ err: `line 1.7: invalid int32: "12"`,
+ },
+
+ // Quoted string posing a float32
+ {
+ in: `others:< weight: "17.4" >`,
+ err: `line 1.17: invalid float32: "17.4"`,
+ },
+
+ // unclosed bracket doesn't cause infinite loop
+ {
+ in: `[`,
+ err: `line 1.0: unclosed type_url or extension name`,
+ },
+
+ // Enum
+ {
+ in: `count:42 bikeshed: BLUE`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Bikeshed: MyMessage_BLUE.Enum(),
+ },
+ },
+
+ // Repeated field
+ {
+ in: `count:42 pet: "horsey" pet:"bunny"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated field with list notation
+ {
+ in: `count:42 pet: ["horsey", "bunny"]`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated message with/without colon and <>/{}
+ {
+ in: `count:42 others:{} others{} others:<> others:{}`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Others: []*OtherMessage{
+ {},
+ {},
+ {},
+ {},
+ },
+ },
+ },
+
+ // Missing colon for inner message
+ {
+ in: `count:42 inner < host: "cauchy.syd" >`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("cauchy.syd"),
+ },
+ },
+ },
+
+ // Missing colon for string field
+ {
+ in: `name "Dave"`,
+ err: `line 1.5: expected ':', found "\"Dave\""`,
+ },
+
+ // Missing colon for int32 field
+ {
+ in: `count 42`,
+ err: `line 1.6: expected ':', found "42"`,
+ },
+
+ // Missing required field
+ {
+ in: `name: "Pawel"`,
+ err: fmt.Sprintf(`proto: required field "%T.count" not set`, MyMessage{}),
+ out: &MyMessage{
+ Name: String("Pawel"),
+ },
+ },
+
+ // Missing required field in a required submessage
+ {
+ in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
+ err: fmt.Sprintf(`proto: required field "%T.host" not set`, InnerMessage{}),
+ out: &MyMessage{
+ Count: Int32(42),
+ WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
+ },
+ },
+
+ // Repeated non-repeated field
+ {
+ in: `name: "Rob" name: "Russ"`,
+ err: `line 1.12: non-repeated field "name" was repeated`,
+ },
+
+ // Group
+ {
+ in: `count: 17 SomeGroup { group_field: 12 }`,
+ out: &MyMessage{
+ Count: Int32(17),
+ Somegroup: &MyMessage_SomeGroup{
+ GroupField: Int32(12),
+ },
+ },
+ },
+
+ // Semicolon between fields
+ {
+ in: `count:3;name:"Calvin"`,
+ out: &MyMessage{
+ Count: Int32(3),
+ Name: String("Calvin"),
+ },
+ },
+ // Comma between fields
+ {
+ in: `count:4,name:"Ezekiel"`,
+ out: &MyMessage{
+ Count: Int32(4),
+ Name: String("Ezekiel"),
+ },
+ },
+
+ // Boolean false
+ {
+ in: `count:42 inner { host: "example.com" connected: false }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean true
+ {
+ in: `count:42 inner { host: "example.com" connected: true }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean 0
+ {
+ in: `count:42 inner { host: "example.com" connected: 0 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean 1
+ {
+ in: `count:42 inner { host: "example.com" connected: 1 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean f
+ {
+ in: `count:42 inner { host: "example.com" connected: f }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean t
+ {
+ in: `count:42 inner { host: "example.com" connected: t }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean False
+ {
+ in: `count:42 inner { host: "example.com" connected: False }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean True
+ {
+ in: `count:42 inner { host: "example.com" connected: True }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+
+ // Extension
+ buildExtStructTest(`count: 42 [test_proto.Ext.more]:`),
+ buildExtStructTest(`count: 42 [test_proto.Ext.more] {data:"Hello, world!"}`),
+ buildExtDataTest(`count: 42 [test_proto.Ext.text]:"Hello, world!" [test_proto.Ext.number]:1729`),
+ buildExtRepStringTest(`count: 42 [test_proto.greeting]:"bula" [test_proto.greeting]:"hola"`),
+
+ // Big all-in-one
+ {
+ in: "count:42 # Meaning\n" +
+ `name:"Dave" ` +
+ `quote:"\"I didn't want to go.\"" ` +
+ `pet:"bunny" ` +
+ `pet:"kitty" ` +
+ `pet:"horsey" ` +
+ `inner:<` +
+ ` host:"footrest.syd" ` +
+ ` port:7001 ` +
+ ` connected:true ` +
+ `> ` +
+ `others:<` +
+ ` key:3735928559 ` +
+ ` value:"\x01A\a\f" ` +
+ `> ` +
+ `others:<` +
+ " weight:58.9 # Atomic weight of Co\n" +
+ ` inner:<` +
+ ` host:"lesha.mtv" ` +
+ ` port:8002 ` +
+ ` >` +
+ `>`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ Quote: String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &InnerMessage{
+ Host: String("footrest.syd"),
+ Port: Int32(7001),
+ Connected: Bool(true),
+ },
+ Others: []*OtherMessage{
+ {
+ Key: Int64(3735928559),
+ Value: []byte{0x1, 'A', '\a', '\f'},
+ },
+ {
+ Weight: Float32(58.9),
+ Inner: &InnerMessage{
+ Host: String("lesha.mtv"),
+ Port: Int32(8002),
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestUnmarshalText(t *testing.T) {
+ for i, test := range unMarshalTextTests {
+ pb := new(MyMessage)
+ err := UnmarshalText(test.in, pb)
+ if test.err == "" {
+ // We don't expect failure.
+ if err != nil {
+ t.Errorf("Test %d: Unexpected error: %v", i, err)
+ } else if !Equal(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ } else {
+ // We do expect failure.
+ if err == nil {
+ t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
+ } else if err.Error() != test.err {
+ t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
+ i, err.Error(), test.err)
+ } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !Equal(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ }
+ }
+}
+
+func TestUnmarshalTextCustomMessage(t *testing.T) {
+ msg := &textMessage{}
+ if err := UnmarshalText("custom", msg); err != nil {
+ t.Errorf("Unexpected error from custom unmarshal: %v", err)
+ }
+ if UnmarshalText("not custom", msg) == nil {
+ t.Errorf("Didn't get expected error from custom unmarshal")
+ }
+}
+
+// Regression test; this caused a panic.
+func TestRepeatedEnum(t *testing.T) {
+ pb := new(RepeatedEnum)
+ if err := UnmarshalText("color: RED", pb); err != nil {
+ t.Fatal(err)
+ }
+ exp := &RepeatedEnum{
+ Color: []RepeatedEnum_Color{RepeatedEnum_RED},
+ }
+ if !Equal(pb, exp) {
+ t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
+ }
+}
+
+func TestProto3TextParsing(t *testing.T) {
+ m := new(proto3pb.Message)
+ const in = `name: "Wallace" true_scotsman: true`
+ want := &proto3pb.Message{
+ Name: "Wallace",
+ TrueScotsman: true,
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestMapParsing(t *testing.T) {
+ m := new(MessageWithMap)
+ const in = `name_mapping: name_mapping:` +
+ `msg_mapping:,>` + // separating commas are okay
+ `msg_mapping>` + // no colon after "value"
+ `msg_mapping:>` + // omitted key
+ `msg_mapping:` + // omitted value
+ `byte_mapping:` +
+ `byte_mapping:<>` // omitted key and value
+ want := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Beatles",
+ 1234: "Feist",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ -4: {F: Float64(2.0)},
+ -2: {F: Float64(4.0)},
+ 0: {F: Float64(5.0)},
+ 1: nil,
+ },
+ ByteMapping: map[bool][]byte{
+ false: nil,
+ true: []byte("so be it"),
+ },
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestOneofParsing(t *testing.T) {
+ const in = `name:"Shrek"`
+ m := new(Communique)
+ want := &Communique{Union: &Communique_Name{"Shrek"}}
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+
+ const inOverwrite = `name:"Shrek" number:42`
+ m = new(Communique)
+ testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
+ if err := UnmarshalText(inOverwrite, m); err == nil {
+ t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
+ } else if err.Error() != testErr {
+ t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
+ err.Error(), testErr)
+ }
+
+}
+
+var benchInput string
+
+func init() {
+ benchInput = "count: 4\n"
+ for i := 0; i < 1000; i++ {
+ benchInput += "pet: \"fido\"\n"
+ }
+
+ // Check it is valid input.
+ pb := new(MyMessage)
+ err := UnmarshalText(benchInput, pb)
+ if err != nil {
+ panic("Bad benchmark input: " + err.Error())
+ }
+}
+
+func BenchmarkUnmarshalText(b *testing.B) {
+ pb := new(MyMessage)
+ for i := 0; i < b.N; i++ {
+ UnmarshalText(benchInput, pb)
+ }
+ b.SetBytes(int64(len(benchInput)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go
new file mode 100644
index 0000000..3c8b033
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_test.go
@@ -0,0 +1,518 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "math"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/test_proto"
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+// textMessage implements the methods that allow it to marshal and unmarshal
+// itself as text.
+type textMessage struct {
+}
+
+func (*textMessage) MarshalText() ([]byte, error) {
+ return []byte("custom"), nil
+}
+
+func (*textMessage) UnmarshalText(bytes []byte) error {
+ if string(bytes) != "custom" {
+ return errors.New("expected 'custom'")
+ }
+ return nil
+}
+
+func (*textMessage) Reset() {}
+func (*textMessage) String() string { return "" }
+func (*textMessage) ProtoMessage() {}
+
+func newTestMessage() *pb.MyMessage {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Quote: proto.String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("footrest.syd"),
+ Port: proto.Int32(7001),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(0xdeadbeef),
+ Value: []byte{1, 65, 7, 12},
+ },
+ {
+ Weight: proto.Float32(6.022),
+ Inner: &pb.InnerMessage{
+ Host: proto.String("lesha.mtv"),
+ Port: proto.Int32(8002),
+ },
+ },
+ },
+ Bikeshed: pb.MyMessage_BLUE.Enum(),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(8),
+ },
+ // One normally wouldn't do this.
+ // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
+ XXX_unrecognized: []byte{13<<3 | 0, 4},
+ }
+ ext := &pb.Ext{
+ Data: proto.String("Big gobs for big rats"),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
+ panic(err)
+ }
+ greetings := []string{"adg", "easy", "cow"}
+ if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
+ panic(err)
+ }
+
+ // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
+ b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
+ if err != nil {
+ panic(err)
+ }
+ b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
+ proto.SetRawExtension(msg, 201, b)
+
+ // Extensions can be plain fields, too, so let's test that.
+ b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
+ proto.SetRawExtension(msg, 202, b)
+
+ return msg
+}
+
+const text = `count: 42
+name: "Dave"
+quote: "\"I didn't want to go.\""
+pet: "bunny"
+pet: "kitty"
+pet: "horsey"
+inner: <
+ host: "footrest.syd"
+ port: 7001
+ connected: true
+>
+others: <
+ key: 3735928559
+ value: "\001A\007\014"
+>
+others: <
+ weight: 6.022
+ inner: <
+ host: "lesha.mtv"
+ port: 8002
+ >
+>
+bikeshed: BLUE
+SomeGroup {
+ group_field: 8
+}
+/* 2 unknown bytes */
+13: 4
+[test_proto.Ext.more]: <
+ data: "Big gobs for big rats"
+>
+[test_proto.greeting]: "adg"
+[test_proto.greeting]: "easy"
+[test_proto.greeting]: "cow"
+/* 13 unknown bytes */
+201: "\t3G skiing"
+/* 3 unknown bytes */
+202: 19
+`
+
+func TestMarshalText(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, newTestMessage()); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != text {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
+ }
+}
+
+func TestMarshalTextCustomMessage(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, &textMessage{}); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != "custom" {
+ t.Errorf("Got %q, expected %q", s, "custom")
+ }
+}
+func TestMarshalTextNil(t *testing.T) {
+ want := ""
+ tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
+ for i, test := range tests {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, test); err != nil {
+ t.Fatal(err)
+ }
+ if got := buf.String(); got != want {
+ t.Errorf("%d: got %q want %q", i, got, want)
+ }
+ }
+}
+
+func TestMarshalTextUnknownEnum(t *testing.T) {
+ // The Color enum only specifies values 0-2.
+ m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
+ got := m.String()
+ const want = `bikeshed:3 `
+ if got != want {
+ t.Errorf("\n got %q\nwant %q", got, want)
+ }
+}
+
+func TestTextOneof(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&pb.Communique{}, ``},
+ // scalar field
+ {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
+ // message field
+ {&pb.Communique{Union: &pb.Communique_Msg{
+ &pb.Strings{StringField: proto.String("why hello!")},
+ }}, `msg:`},
+ // bad oneof (should not panic)
+ {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
+
+func BenchmarkMarshalTextBuffered(b *testing.B) {
+ buf := new(bytes.Buffer)
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ proto.MarshalText(buf, m)
+ }
+}
+
+func BenchmarkMarshalTextUnbuffered(b *testing.B) {
+ w := ioutil.Discard
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ proto.MarshalText(w, m)
+ }
+}
+
+func compact(src string) string {
+ // s/[ \n]+/ /g; s/ $//;
+ dst := make([]byte, len(src))
+ space, comment := false, false
+ j := 0
+ for i := 0; i < len(src); i++ {
+ if strings.HasPrefix(src[i:], "/*") {
+ comment = true
+ i++
+ continue
+ }
+ if comment && strings.HasPrefix(src[i:], "*/") {
+ comment = false
+ i++
+ continue
+ }
+ if comment {
+ continue
+ }
+ c := src[i]
+ if c == ' ' || c == '\n' {
+ space = true
+ continue
+ }
+ if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
+ space = false
+ }
+ if c == '{' {
+ space = false
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ space = false
+ }
+ dst[j] = c
+ j++
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ }
+ return string(dst[0:j])
+}
+
+var compactText = compact(text)
+
+func TestCompactText(t *testing.T) {
+ s := proto.CompactTextString(newTestMessage())
+ if s != compactText {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
+ }
+}
+
+func TestStringEscaping(t *testing.T) {
+ testCases := []struct {
+ in *pb.Strings
+ out string
+ }{
+ {
+ // Test data from C++ test (TextFormatTest.StringEscape).
+ // Single divergence: we don't escape apostrophes.
+ &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
+ "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
+ },
+ {
+ // Test data from the same C++ test.
+ &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
+ "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
+ },
+ {
+ // Some UTF-8.
+ &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
+ `string_field: "\000\001\377\201"` + "\n",
+ },
+ }
+
+ for i, tc := range testCases {
+ var buf bytes.Buffer
+ if err := proto.MarshalText(&buf, tc.in); err != nil {
+ t.Errorf("proto.MarsalText: %v", err)
+ continue
+ }
+ s := buf.String()
+ if s != tc.out {
+ t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
+ continue
+ }
+
+ // Check round-trip.
+ pb := new(pb.Strings)
+ if err := proto.UnmarshalText(s, pb); err != nil {
+ t.Errorf("#%d: UnmarshalText: %v", i, err)
+ continue
+ }
+ if !proto.Equal(pb, tc.in) {
+ t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
+ }
+ }
+}
+
+// A limitedWriter accepts some output before it fails.
+// This is a proxy for something like a nearly-full or imminently-failing disk,
+// or a network connection that is about to die.
+type limitedWriter struct {
+ b bytes.Buffer
+ limit int
+}
+
+var outOfSpace = errors.New("proto: insufficient space")
+
+func (w *limitedWriter) Write(p []byte) (n int, err error) {
+ var avail = w.limit - w.b.Len()
+ if avail <= 0 {
+ return 0, outOfSpace
+ }
+ if len(p) <= avail {
+ return w.b.Write(p)
+ }
+ n, _ = w.b.Write(p[:avail])
+ return n, outOfSpace
+}
+
+func TestMarshalTextFailing(t *testing.T) {
+ // Try lots of different sizes to exercise more error code-paths.
+ for lim := 0; lim < len(text); lim++ {
+ buf := new(limitedWriter)
+ buf.limit = lim
+ err := proto.MarshalText(buf, newTestMessage())
+ // We expect a certain error, but also some partial results in the buffer.
+ if err != outOfSpace {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
+ }
+ s := buf.b.String()
+ x := text[:buf.limit]
+ if s != x {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
+ }
+ }
+}
+
+func TestFloats(t *testing.T) {
+ tests := []struct {
+ f float64
+ want string
+ }{
+ {0, "0"},
+ {4.7, "4.7"},
+ {math.Inf(1), "inf"},
+ {math.Inf(-1), "-inf"},
+ {math.NaN(), "nan"},
+ }
+ for _, test := range tests {
+ msg := &pb.FloatingPoint{F: &test.f}
+ got := strings.TrimSpace(msg.String())
+ want := `f:` + test.want
+ if got != want {
+ t.Errorf("f=%f: got %q, want %q", test.f, got, want)
+ }
+ }
+}
+
+func TestRepeatedNilText(t *testing.T) {
+ m := &pb.MessageList{
+ Message: []*pb.MessageList_Message{
+ nil,
+ &pb.MessageList_Message{
+ Name: proto.String("Horse"),
+ },
+ nil,
+ },
+ }
+ want := `Message
+Message {
+ name: "Horse"
+}
+Message
+`
+ if s := proto.MarshalTextString(m); s != want {
+ t.Errorf(" got: %s\nwant: %s", s, want)
+ }
+}
+
+func TestProto3Text(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&proto3pb.Message{}, ``},
+ // zero message except for an empty byte slice
+ {&proto3pb.Message{Data: []byte{}}, ``},
+ // trivial case
+ {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
+ // empty map
+ {&pb.MessageWithMap{}, ``},
+ // non-empty map; map format is the same as a repeated struct,
+ // and they are sorted by key (numerically for numeric keys).
+ {
+ &pb.MessageWithMap{NameMapping: map[int32]string{
+ -1: "Negatory",
+ 7: "Lucky",
+ 1234: "Feist",
+ 6345789: "Otis",
+ }},
+ `name_mapping: ` +
+ `name_mapping: ` +
+ `name_mapping: ` +
+ `name_mapping:`,
+ },
+ // map with nil value; not well-defined, but we shouldn't crash
+ {
+ &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
+ `msg_mapping:`,
+ },
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
+
+func TestRacyMarshal(t *testing.T) {
+ // This test should be run with the race detector.
+
+ any := &pb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
+ proto.SetExtension(any, pb.E_Ext_Text, proto.String("bar"))
+ b, err := proto.Marshal(any)
+ if err != nil {
+ panic(err)
+ }
+ m := &proto3pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any), Value: b},
+ }
+
+ wantText := proto.MarshalTextString(m)
+ wantBytes, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal error: %v", err)
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+ wg.Add(20)
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ got := proto.MarshalTextString(m)
+ if got != wantText {
+ t.Errorf("proto.MarshalTextString = %q, want %q", got, wantText)
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ got, err := proto.Marshal(m)
+ if !bytes.Equal(got, wantBytes) || err != nil {
+ t.Errorf("proto.Marshal = (%x, %v), want (%x, nil)", got, err, wantBytes)
+ }
+ }()
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/regenerate.sh b/vendor/github.com/golang/protobuf/regenerate.sh
new file mode 100755
index 0000000..db0a0d6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/regenerate.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -e
+
+# Install the working tree's protoc-gen-gen in a tempdir.
+tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
+trap 'rm -rf $tmpdir' EXIT
+mkdir -p $tmpdir/bin
+PATH=$tmpdir/bin:$PATH
+GOBIN=$tmpdir/bin go install ./protoc-gen-go
+
+# Public imports require at least Go 1.9.
+supportTypeAliases=""
+if go list -f '{{context.ReleaseTags}}' runtime | grep -q go1.9; then
+ supportTypeAliases=1
+fi
+
+# Generate various test protos.
+PROTO_DIRS=(
+ jsonpb/jsonpb_test_proto
+ proto
+ protoc-gen-go/testdata
+)
+for dir in ${PROTO_DIRS[@]}; do
+ for p in `find $dir -name "*.proto"`; do
+ if [[ $p == */import_public/* && ! $supportTypeAliases ]]; then
+ echo "# $p (skipped)"
+ continue;
+ fi
+ echo "# $p"
+ protoc -I$dir --go_out=plugins=grpc,paths=source_relative:$dir $p
+ done
+done
+
+# Deriving the location of the source protos from the path to the
+# protoc binary may be a bit odd, but this is what protoc itself does.
+PROTO_INCLUDE=$(dirname $(dirname $(which protoc)))/include
+
+# Well-known types.
+WKT_PROTOS=(any duration empty struct timestamp wrappers)
+for p in ${WKT_PROTOS[@]}; do
+ echo "# google/protobuf/$p.proto"
+ protoc --go_out=paths=source_relative:$tmpdir google/protobuf/$p.proto
+ cp $tmpdir/google/protobuf/$p.pb.go ptypes/$p
+ cp $PROTO_INCLUDE/google/protobuf/$p.proto ptypes/$p
+done
+
+# descriptor.proto.
+echo "# google/protobuf/descriptor.proto"
+protoc --go_out=paths=source_relative:$tmpdir google/protobuf/descriptor.proto
+cp $tmpdir/google/protobuf/descriptor.pb.go protoc-gen-go/descriptor
+cp $PROTO_INCLUDE/google/protobuf/descriptor.proto protoc-gen-go/descriptor
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/shlex/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README
new file mode 100644
index 0000000..c86bcc0
--- /dev/null
+++ b/vendor/github.com/google/shlex/README
@@ -0,0 +1,2 @@
+go-shlex is a simple lexer for go that supports shell-style quoting,
+commenting, and escaping.
diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go
new file mode 100644
index 0000000..d98308b
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex.go
@@ -0,0 +1,416 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package shlex implements a simple lexer which splits input in to tokens using
+shell-style rules for quoting and commenting.
+
+The basic use case uses the default ASCII lexer to split a string into sub-strings:
+
+ shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
+
+To process a stream of strings:
+
+ l := NewLexer(os.Stdin)
+ for ; token, err := l.Next(); err != nil {
+ // process token
+ }
+
+To access the raw token stream (which includes tokens for comments):
+
+ t := NewTokenizer(os.Stdin)
+ for ; token, err := t.Next(); err != nil {
+ // process token
+ }
+
+*/
+package shlex
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TokenType is a top-level token classification: A word, space, comment, unknown.
+type TokenType int
+
+// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
+type runeTokenClass int
+
+// the internal state used by the lexer state machine
+type lexerState int
+
+// Token is a (type, value) pair representing a lexographical token.
+type Token struct {
+ tokenType TokenType
+ value string
+}
+
+// Equal reports whether tokens a, and b, are equal.
+// Two tokens are equal if both their types and values are equal. A nil token can
+// never be equal to another token.
+func (a *Token) Equal(b *Token) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.tokenType != b.tokenType {
+ return false
+ }
+ return a.value == b.value
+}
+
+// Named classes of UTF-8 runes
+const (
+ spaceRunes = " \t\r\n"
+ escapingQuoteRunes = `"`
+ nonEscapingQuoteRunes = "'"
+ escapeRunes = `\`
+ commentRunes = "#"
+)
+
+// Classes of rune token
+const (
+ unknownRuneClass runeTokenClass = iota
+ spaceRuneClass
+ escapingQuoteRuneClass
+ nonEscapingQuoteRuneClass
+ escapeRuneClass
+ commentRuneClass
+ eofRuneClass
+)
+
+// Classes of lexographic token
+const (
+ UnknownToken TokenType = iota
+ WordToken
+ SpaceToken
+ CommentToken
+)
+
+// Lexer state machine states
+const (
+ startState lexerState = iota // no runes have been seen
+ inWordState // processing regular runes in a word
+ escapingState // we have just consumed an escape rune; the next rune is literal
+ escapingQuotedState // we have just consumed an escape rune within a quoted string
+ quotingEscapingState // we are within a quoted string that supports escaping ("...")
+ quotingState // we are within a string that does not support escaping ('...')
+ commentState // we are within a comment (everything following an unquoted or unescaped #
+)
+
+// tokenClassifier is used for classifying rune characters.
+type tokenClassifier map[rune]runeTokenClass
+
+func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
+ for _, runeChar := range runes {
+ typeMap[runeChar] = tokenType
+ }
+}
+
+// newDefaultClassifier creates a new classifier for ASCII characters.
+func newDefaultClassifier() tokenClassifier {
+ t := tokenClassifier{}
+ t.addRuneClass(spaceRunes, spaceRuneClass)
+ t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
+ t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
+ t.addRuneClass(escapeRunes, escapeRuneClass)
+ t.addRuneClass(commentRunes, commentRuneClass)
+ return t
+}
+
+// ClassifyRune classifiees a rune
+func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
+ return t[runeVal]
+}
+
+// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
+type Lexer Tokenizer
+
+// NewLexer creates a new lexer from an input stream.
+func NewLexer(r io.Reader) *Lexer {
+
+ return (*Lexer)(NewTokenizer(r))
+}
+
+// Next returns the next word, or an error. If there are no more words,
+// the error will be io.EOF.
+func (l *Lexer) Next() (string, error) {
+ for {
+ token, err := (*Tokenizer)(l).Next()
+ if err != nil {
+ return "", err
+ }
+ switch token.tokenType {
+ case WordToken:
+ return token.value, nil
+ case CommentToken:
+ // skip comments
+ default:
+ return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
+ }
+ }
+}
+
+// Tokenizer turns an input stream into a sequence of typed tokens
+type Tokenizer struct {
+ input bufio.Reader
+ classifier tokenClassifier
+}
+
+// NewTokenizer creates a new tokenizer from an input stream.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ input := bufio.NewReader(r)
+ classifier := newDefaultClassifier()
+ return &Tokenizer{
+ input: *input,
+ classifier: classifier}
+}
+
+// scanStream scans the stream for the next token using the internal state machine.
+// It will panic if it encounters a rune which it does not know how to handle.
+func (t *Tokenizer) scanStream() (*Token, error) {
+ state := startState
+ var tokenType TokenType
+ var value []rune
+ var nextRune rune
+ var nextRuneType runeTokenClass
+ var err error
+
+ for {
+ nextRune, _, err = t.input.ReadRune()
+ nextRuneType = t.classifier.ClassifyRune(nextRune)
+
+ if err == io.EOF {
+ nextRuneType = eofRuneClass
+ err = nil
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch state {
+ case startState: // no runes read yet
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ return nil, io.EOF
+ }
+ case spaceRuneClass:
+ {
+ }
+ case escapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ tokenType = WordToken
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ tokenType = WordToken
+ state = escapingState
+ }
+ case commentRuneClass:
+ {
+ tokenType = CommentToken
+ state = commentState
+ }
+ default:
+ {
+ tokenType = WordToken
+ value = append(value, nextRune)
+ state = inWordState
+ }
+ }
+ }
+ case inWordState: // in a regular word
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = quotingEscapingState
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = quotingState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingState: // the rune after an escape character
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = inWordState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case escapingQuotedState: // the next rune after an escape character, in double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found after escape character")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ default:
+ {
+ state = quotingEscapingState
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingEscapingState: // in escaping double quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case escapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ case escapeRuneClass:
+ {
+ state = escapingQuotedState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case quotingState: // in non-escaping single quotes
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ err = fmt.Errorf("EOF found when expecting closing quote")
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case nonEscapingQuoteRuneClass:
+ {
+ state = inWordState
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ case commentState: // in a comment
+ {
+ switch nextRuneType {
+ case eofRuneClass:
+ {
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ }
+ case spaceRuneClass:
+ {
+ if nextRune == '\n' {
+ state = startState
+ token := &Token{
+ tokenType: tokenType,
+ value: string(value)}
+ return token, err
+ } else {
+ value = append(value, nextRune)
+ }
+ }
+ default:
+ {
+ value = append(value, nextRune)
+ }
+ }
+ }
+ default:
+ {
+ return nil, fmt.Errorf("Unexpected state: %v", state)
+ }
+ }
+ }
+}
+
+// Next returns the next token in the stream.
+func (t *Tokenizer) Next() (*Token, error) {
+ return t.scanStream()
+}
+
+// Split partitions a string into a slice of strings.
+func Split(s string) ([]string, error) {
+ l := NewLexer(strings.NewReader(s))
+ subStrings := make([]string, 0)
+ for {
+ word, err := l.Next()
+ if err != nil {
+ if err == io.EOF {
+ return subStrings, nil
+ }
+ return subStrings, err
+ }
+ subStrings = append(subStrings, word)
+ }
+}
diff --git a/vendor/github.com/google/shlex/shlex_test.go b/vendor/github.com/google/shlex/shlex_test.go
new file mode 100644
index 0000000..f9f9e0c
--- /dev/null
+++ b/vendor/github.com/google/shlex/shlex_test.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2012 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package shlex
+
+import (
+ "strings"
+ "testing"
+)
+
+var (
+ // one two "three four" "five \"six\"" seven#eight # nine # ten
+ // eleven 'twelve\'
+ testString = "one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven 'twelve\\' thirteen=13 fourteen/14"
+)
+
+func TestClassifier(t *testing.T) {
+ classifier := newDefaultClassifier()
+ tests := map[rune]runeTokenClass{
+ ' ': spaceRuneClass,
+ '"': escapingQuoteRuneClass,
+ '\'': nonEscapingQuoteRuneClass,
+ '#': commentRuneClass}
+ for runeChar, want := range tests {
+ got := classifier.ClassifyRune(runeChar)
+ if got != want {
+ t.Errorf("ClassifyRune(%v) -> %v. Want: %v", runeChar, got, want)
+ }
+ }
+}
+
+func TestTokenizer(t *testing.T) {
+ testInput := strings.NewReader(testString)
+ expectedTokens := []*Token{
+ &Token{WordToken, "one"},
+ &Token{WordToken, "two"},
+ &Token{WordToken, "three four"},
+ &Token{WordToken, "five \"six\""},
+ &Token{WordToken, "seven#eight"},
+ &Token{CommentToken, " nine # ten"},
+ &Token{WordToken, "eleven"},
+ &Token{WordToken, "twelve\\"},
+ &Token{WordToken, "thirteen=13"},
+ &Token{WordToken, "fourteen/14"}}
+
+ tokenizer := NewTokenizer(testInput)
+ for i, want := range expectedTokens {
+ got, err := tokenizer.Next()
+ if err != nil {
+ t.Error(err)
+ }
+ if !got.Equal(want) {
+ t.Errorf("Tokenizer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want)
+ }
+ }
+}
+
+func TestLexer(t *testing.T) {
+ testInput := strings.NewReader(testString)
+ expectedStrings := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"}
+
+ lexer := NewLexer(testInput)
+ for i, want := range expectedStrings {
+ got, err := lexer.Next()
+ if err != nil {
+ t.Error(err)
+ }
+ if got != want {
+ t.Errorf("Lexer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want)
+ }
+ }
+}
+
+func TestSplit(t *testing.T) {
+ want := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"}
+ got, err := Split(testString)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(want) != len(got) {
+ t.Errorf("Split(%q) -> %v. Want: %v", testString, got, want)
+ }
+ for i := range got {
+ if got[i] != want[i] {
+ t.Errorf("Split(%q)[%v] -> %v. Want: %v", testString, i, got[i], want[i])
+ }
+ }
+}
diff --git a/vendor/github.com/gordonklaus/ineffassign/.gitignore b/vendor/github.com/gordonklaus/ineffassign/.gitignore
new file mode 100644
index 0000000..c4feb4f
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/.gitignore
@@ -0,0 +1,30 @@
+ineffassign
+
+# Created by https://www.gitignore.io/api/go
+
+### Go ###
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
diff --git a/vendor/github.com/gordonklaus/ineffassign/LICENSE b/vendor/github.com/gordonklaus/ineffassign/LICENSE
new file mode 100644
index 0000000..9e3d9bc
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Gordon Klaus and contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/gordonklaus/ineffassign/README.md b/vendor/github.com/gordonklaus/ineffassign/README.md
new file mode 100644
index 0000000..6dcb9f0
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/README.md
@@ -0,0 +1,4 @@
+# ineffassign
+Detect ineffectual assignments in Go code.
+
+This tool misses some cases because does not consider any type information in its analysis. (For example, assignments to struct fields are never marked as ineffectual.) It should, however, never give any false positives.
diff --git a/vendor/github.com/gordonklaus/ineffassign/bugs b/vendor/github.com/gordonklaus/ineffassign/bugs
new file mode 100644
index 0000000..468177e
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/bugs
@@ -0,0 +1,7 @@
+cmd/compile/internal/big/floatconv.go:367:2 m
+cmd/cover/cover_test.go:62:2 err
+cmd/pprof/internal/profile/profile.go:131:10 err
+math/big/ftoa.go:285:2 m
+net/file_unix.go:66:7 err
+golang.org/x/mobile/app/android.go:175:2 queue
+golang.org/x/net/icmp/listen_posix.go:83:6 err
diff --git a/vendor/github.com/gordonklaus/ineffassign/ineffassign.go b/vendor/github.com/gordonklaus/ineffassign/ineffassign.go
new file mode 100644
index 0000000..d067bc4
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/ineffassign.go
@@ -0,0 +1,623 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+const invalidArgumentExitCode = 3
+
+var dontRecurseFlag = flag.Bool("n", false, "don't recursively check paths")
+
+func main() {
+ flag.Parse()
+
+ if len(flag.Args()) == 0 {
+ fmt.Println("missing argument: filepath")
+ os.Exit(invalidArgumentExitCode)
+ }
+
+ lintFailed := false
+ for _, path := range flag.Args() {
+ root, err := filepath.Abs(path)
+ if err != nil {
+ fmt.Printf("Error finding absolute path: %s", err)
+ os.Exit(invalidArgumentExitCode)
+ }
+ if walkPath(root) {
+ lintFailed = true
+ }
+ }
+ if lintFailed {
+ os.Exit(1)
+ }
+}
+
+func walkPath(root string) bool {
+ lintFailed := false
+ filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ fmt.Printf("Error during filesystem walk: %v\n", err)
+ return nil
+ }
+ if fi.IsDir() {
+ if path != root && (*dontRecurseFlag ||
+ filepath.Base(path) == "testdata" ||
+ filepath.Base(path) == "vendor") {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if !strings.HasSuffix(path, ".go") {
+ return nil
+ }
+ fset, _, ineff := checkPath(path)
+ for _, id := range ineff {
+ fmt.Printf("%s: ineffectual assignment to %s\n", fset.Position(id.Pos()), id.Name)
+ lintFailed = true
+ }
+ return nil
+ })
+ return lintFailed
+}
+
+func checkPath(path string) (*token.FileSet, []*ast.CommentGroup, []*ast.Ident) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
+ if err != nil {
+ return nil, nil, nil
+ }
+
+ bld := &builder{vars: map[*ast.Object]*variable{}}
+ bld.walk(f)
+
+ chk := &checker{vars: bld.vars, seen: map[*block]bool{}}
+ for _, b := range bld.roots {
+ chk.check(b)
+ }
+ sort.Sort(chk.ineff)
+
+ return fset, f.Comments, chk.ineff
+}
+
+type builder struct {
+ roots []*block
+ block *block
+ vars map[*ast.Object]*variable
+ results []*ast.FieldList
+ breaks branchStack
+ continues branchStack
+ gotos branchStack
+ labelStmt *ast.LabeledStmt
+}
+
+type block struct {
+ children []*block
+ ops map[*ast.Object][]operation
+}
+
+func (b *block) addChild(c *block) {
+ b.children = append(b.children, c)
+}
+
+type operation struct {
+ id *ast.Ident
+ assign bool
+}
+
+type variable struct {
+ fundept int
+ escapes bool
+}
+
+func (bld *builder) walk(n ast.Node) {
+ if n != nil {
+ ast.Walk(bld, n)
+ }
+}
+
+func (bld *builder) Visit(n ast.Node) ast.Visitor {
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ if n.Body != nil {
+ bld.fun(n.Type, n.Body)
+ }
+ case *ast.FuncLit:
+ bld.fun(n.Type, n.Body)
+ case *ast.IfStmt:
+ bld.walk(n.Init)
+ bld.walk(n.Cond)
+ b0 := bld.block
+ bld.newBlock(b0)
+ bld.walk(n.Body)
+ b1 := bld.block
+ if n.Else != nil {
+ bld.newBlock(b0)
+ bld.walk(n.Else)
+ b0 = bld.block
+ }
+ bld.newBlock(b0, b1)
+ case *ast.ForStmt:
+ lbl := bld.stmtLabel(n)
+ brek := bld.breaks.push(lbl)
+ continu := bld.continues.push(lbl)
+ bld.walk(n.Init)
+ start := bld.newBlock(bld.block)
+ bld.walk(n.Cond)
+ cond := bld.block
+ bld.newBlock(cond)
+ bld.walk(n.Body)
+ continu.setDestination(bld.newBlock(bld.block))
+ bld.walk(n.Post)
+ bld.block.addChild(start)
+ brek.setDestination(bld.newBlock(cond))
+ bld.breaks.pop()
+ bld.continues.pop()
+ case *ast.RangeStmt:
+ lbl := bld.stmtLabel(n)
+ brek := bld.breaks.push(lbl)
+ continu := bld.continues.push(lbl)
+ bld.walk(n.X)
+ pre := bld.newBlock(bld.block)
+ start := bld.newBlock(pre)
+ if n.Key != nil {
+ lhs := []ast.Expr{n.Key}
+ if n.Value != nil {
+ lhs = append(lhs, n.Value)
+ }
+ bld.walk(&ast.AssignStmt{Lhs: lhs, Tok: n.Tok, TokPos: n.TokPos, Rhs: []ast.Expr{&ast.Ident{NamePos: n.X.End()}}})
+ }
+ bld.walk(n.Body)
+ bld.block.addChild(start)
+ continu.setDestination(pre)
+ brek.setDestination(bld.newBlock(pre, bld.block))
+ bld.breaks.pop()
+ bld.continues.pop()
+ case *ast.SwitchStmt:
+ bld.walk(n.Init)
+ bld.walk(n.Tag)
+ bld.swtch(n, n.Body.List)
+ case *ast.TypeSwitchStmt:
+ bld.walk(n.Init)
+ bld.walk(n.Assign)
+ bld.swtch(n, n.Body.List)
+ case *ast.SelectStmt:
+ brek := bld.breaks.push(bld.stmtLabel(n))
+ for _, c := range n.Body.List {
+ c := c.(*ast.CommClause).Comm
+ if s, ok := c.(*ast.AssignStmt); ok {
+ bld.walk(s.Rhs[0])
+ } else {
+ bld.walk(c)
+ }
+ }
+ b0 := bld.block
+ exits := make([]*block, len(n.Body.List))
+ dfault := false
+ for i, c := range n.Body.List {
+ c := c.(*ast.CommClause)
+ bld.newBlock(b0)
+ bld.walk(c)
+ exits[i] = bld.block
+ dfault = dfault || c.Comm == nil
+ }
+ if !dfault {
+ exits = append(exits, b0)
+ }
+ brek.setDestination(bld.newBlock(exits...))
+ bld.breaks.pop()
+ case *ast.LabeledStmt:
+ bld.gotos.get(n.Label).setDestination(bld.newBlock(bld.block))
+ bld.labelStmt = n
+ bld.walk(n.Stmt)
+ case *ast.BranchStmt:
+ switch n.Tok {
+ case token.BREAK:
+ bld.breaks.get(n.Label).addSource(bld.block)
+ bld.newBlock()
+ case token.CONTINUE:
+ bld.continues.get(n.Label).addSource(bld.block)
+ bld.newBlock()
+ case token.GOTO:
+ bld.gotos.get(n.Label).addSource(bld.block)
+ bld.newBlock()
+ }
+
+ case *ast.AssignStmt:
+ if n.Tok == token.QUO_ASSIGN || n.Tok == token.REM_ASSIGN {
+ bld.maybePanic()
+ }
+
+ for _, x := range n.Rhs {
+ bld.walk(x)
+ }
+ for i, x := range n.Lhs {
+ if id, ok := ident(x); ok {
+ if n.Tok >= token.ADD_ASSIGN && n.Tok <= token.AND_NOT_ASSIGN {
+ bld.use(id)
+ }
+ // Don't treat explicit initialization to zero as assignment; it is often used as shorthand for a bare declaration.
+ if n.Tok == token.DEFINE && i < len(n.Rhs) && isZeroInitializer(n.Rhs[i]) {
+ bld.use(id)
+ } else {
+ bld.assign(id)
+ }
+ } else {
+ bld.walk(x)
+ }
+ }
+ case *ast.GenDecl:
+ if n.Tok == token.VAR {
+ for _, s := range n.Specs {
+ s := s.(*ast.ValueSpec)
+ for _, x := range s.Values {
+ bld.walk(x)
+ }
+ for _, id := range s.Names {
+ if len(s.Values) > 0 {
+ bld.assign(id)
+ } else {
+ bld.use(id)
+ }
+ }
+ }
+ }
+ case *ast.IncDecStmt:
+ if id, ok := ident(n.X); ok {
+ bld.use(id)
+ bld.assign(id)
+ } else {
+ bld.walk(n.X)
+ }
+ case *ast.Ident:
+ bld.use(n)
+ case *ast.ReturnStmt:
+ for _, x := range n.Results {
+ bld.walk(x)
+ }
+ res := bld.results[len(bld.results)-1]
+ if res == nil {
+ break
+ }
+ for _, f := range res.List {
+ for _, id := range f.Names {
+ if n.Results != nil {
+ bld.assign(id)
+ }
+ bld.use(id)
+ }
+ }
+ case *ast.SendStmt:
+ bld.maybePanic()
+ return bld
+
+ case *ast.BinaryExpr:
+ if n.Op == token.EQL || n.Op == token.QUO || n.Op == token.REM {
+ bld.maybePanic()
+ }
+ return bld
+ case *ast.CallExpr:
+ bld.maybePanic()
+ return bld
+ case *ast.IndexExpr:
+ bld.maybePanic()
+ return bld
+ case *ast.UnaryExpr:
+ id, ok := ident(n.X)
+ if ix, isIx := n.X.(*ast.IndexExpr); isIx {
+ // We don't care about indexing into slices, but without type information we can do no better.
+ id, ok = ident(ix.X)
+ }
+ if ok && n.Op == token.AND {
+ if v, ok := bld.vars[id.Obj]; ok {
+ v.escapes = true
+ }
+ }
+ return bld
+ case *ast.SelectorExpr:
+ bld.maybePanic()
+ // A method call (possibly delayed via a method value) might implicitly take
+ // the address of its receiver, causing it to escape.
+ // We can't do any better here without knowing the variable's type.
+ if id, ok := ident(n.X); ok {
+ if v, ok := bld.vars[id.Obj]; ok {
+ v.escapes = true
+ }
+ }
+ return bld
+ case *ast.SliceExpr:
+ bld.maybePanic()
+ // We don't care about slicing into slices, but without type information we can do no better.
+ if id, ok := ident(n.X); ok {
+ if v, ok := bld.vars[id.Obj]; ok {
+ v.escapes = true
+ }
+ }
+ return bld
+ case *ast.StarExpr:
+ bld.maybePanic()
+ return bld
+ case *ast.TypeAssertExpr:
+ bld.maybePanic()
+ return bld
+
+ default:
+ return bld
+ }
+ return nil
+}
+
+func isZeroInitializer(x ast.Expr) bool {
+ // Assume that a call expression of a single argument is a conversion expression. We can't do better without type information.
+ if c, ok := x.(*ast.CallExpr); ok {
+ switch c.Fun.(type) {
+ case *ast.Ident, *ast.SelectorExpr:
+ default:
+ return false
+ }
+ if len(c.Args) != 1 {
+ return false
+ }
+ x = c.Args[0]
+ }
+
+ b, ok := x.(*ast.BasicLit)
+ if !ok {
+ return false
+ }
+ switch b.Value {
+ case "0", "0.0", "0.", ".0", `""`:
+ return true
+ }
+ return false
+}
+
+func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) {
+ for _, v := range bld.vars {
+ v.fundept++
+ }
+ bld.results = append(bld.results, typ.Results)
+
+ b := bld.block
+ bld.newBlock()
+ bld.roots = append(bld.roots, bld.block)
+ bld.walk(typ)
+ bld.walk(body)
+ bld.block = b
+
+ bld.results = bld.results[:len(bld.results)-1]
+ for _, v := range bld.vars {
+ v.fundept--
+ }
+}
+
+func (bld *builder) swtch(stmt ast.Stmt, cases []ast.Stmt) {
+ brek := bld.breaks.push(bld.stmtLabel(stmt))
+ b0 := bld.block
+ list := b0
+ exits := make([]*block, 0, len(cases)+1)
+ var dfault, fallthru *block
+ for _, c := range cases {
+ c := c.(*ast.CaseClause)
+
+ if c.List != nil {
+ list = bld.newBlock(list)
+ for _, x := range c.List {
+ bld.walk(x)
+ }
+ }
+
+ parents := []*block{}
+ if c.List != nil {
+ parents = append(parents, list)
+ }
+ if fallthru != nil {
+ parents = append(parents, fallthru)
+ fallthru = nil
+ }
+ bld.newBlock(parents...)
+ if c.List == nil {
+ dfault = bld.block
+ }
+ for _, s := range c.Body {
+ bld.walk(s)
+ if s, ok := s.(*ast.BranchStmt); ok && s.Tok == token.FALLTHROUGH {
+ fallthru = bld.block
+ }
+ }
+
+ if fallthru == nil {
+ exits = append(exits, bld.block)
+ }
+ }
+ if dfault != nil {
+ list.addChild(dfault)
+ } else {
+ exits = append(exits, b0)
+ }
+ brek.setDestination(bld.newBlock(exits...))
+ bld.breaks.pop()
+}
+
+// An operation that might panic marks named function results as used.
+func (bld *builder) maybePanic() {
+ if len(bld.results) == 0 {
+ return
+ }
+ res := bld.results[len(bld.results)-1]
+ if res == nil {
+ return
+ }
+ for _, f := range res.List {
+ for _, id := range f.Names {
+ bld.use(id)
+ }
+ }
+}
+
+func (bld *builder) newBlock(parents ...*block) *block {
+ bld.block = &block{ops: map[*ast.Object][]operation{}}
+ for _, b := range parents {
+ b.addChild(bld.block)
+ }
+ return bld.block
+}
+
+func (bld *builder) stmtLabel(s ast.Stmt) *ast.Object {
+ if ls := bld.labelStmt; ls != nil && ls.Stmt == s {
+ return ls.Label.Obj
+ }
+ return nil
+}
+
+func (bld *builder) assign(id *ast.Ident) {
+ bld.newOp(id, true)
+}
+
+func (bld *builder) use(id *ast.Ident) {
+ bld.newOp(id, false)
+}
+
+func (bld *builder) newOp(id *ast.Ident, assign bool) {
+ if id.Name == "_" || id.Obj == nil {
+ return
+ }
+
+ v, ok := bld.vars[id.Obj]
+ if !ok {
+ v = &variable{}
+ bld.vars[id.Obj] = v
+ }
+ v.escapes = v.escapes || v.fundept > 0 || bld.block == nil
+
+ if b := bld.block; b != nil {
+ b.ops[id.Obj] = append(b.ops[id.Obj], operation{id, assign})
+ }
+}
+
+type branchStack []*branch
+
+type branch struct {
+ label *ast.Object
+ srcs []*block
+ dst *block
+}
+
+func (s *branchStack) push(lbl *ast.Object) *branch {
+ br := &branch{label: lbl}
+ *s = append(*s, br)
+ return br
+}
+
+func (s *branchStack) get(lbl *ast.Ident) *branch {
+ for i := len(*s) - 1; i >= 0; i-- {
+ if br := (*s)[i]; lbl == nil || br.label == lbl.Obj {
+ return br
+ }
+ }
+
+ // Guard against invalid code (break/continue outside of loop).
+ if lbl == nil {
+ return &branch{}
+ }
+
+ return s.push(lbl.Obj)
+}
+
+func (br *branch) addSource(src *block) {
+ br.srcs = append(br.srcs, src)
+ if br.dst != nil {
+ src.addChild(br.dst)
+ }
+}
+
+func (br *branch) setDestination(dst *block) {
+ br.dst = dst
+ for _, src := range br.srcs {
+ src.addChild(dst)
+ }
+}
+
+func (s *branchStack) pop() {
+ *s = (*s)[:len(*s)-1]
+}
+
+func ident(x ast.Expr) (*ast.Ident, bool) {
+ if p, ok := x.(*ast.ParenExpr); ok {
+ return ident(p.X)
+ }
+ id, ok := x.(*ast.Ident)
+ return id, ok
+}
+
+type checker struct {
+ vars map[*ast.Object]*variable
+ seen map[*block]bool
+ ineff idents
+}
+
+func (chk *checker) check(b *block) {
+ if chk.seen[b] {
+ return
+ }
+ chk.seen[b] = true
+
+ for obj, ops := range b.ops {
+ if chk.vars[obj].escapes {
+ continue
+ }
+ ops:
+ for i, op := range ops {
+ if !op.assign {
+ continue
+ }
+ if i+1 < len(ops) {
+ if ops[i+1].assign {
+ chk.ineff = append(chk.ineff, op.id)
+ }
+ continue
+ }
+ seen := map[*block]bool{}
+ for _, b := range b.children {
+ if used(obj, b, seen) {
+ continue ops
+ }
+ }
+ chk.ineff = append(chk.ineff, op.id)
+ }
+ }
+
+ for _, b := range b.children {
+ chk.check(b)
+ }
+}
+
+func used(obj *ast.Object, b *block, seen map[*block]bool) bool {
+ if seen[b] {
+ return false
+ }
+ seen[b] = true
+
+ if ops := b.ops[obj]; len(ops) > 0 {
+ return !ops[0].assign
+ }
+ for _, b := range b.children {
+ if used(obj, b, seen) {
+ return true
+ }
+ }
+ return false
+}
+
+type idents []*ast.Ident
+
+func (ids idents) Len() int { return len(ids) }
+func (ids idents) Less(i, j int) bool { return ids[i].Pos() < ids[j].Pos() }
+func (ids idents) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] }
diff --git a/vendor/github.com/gordonklaus/ineffassign/ineffassign_test.go b/vendor/github.com/gordonklaus/ineffassign/ineffassign_test.go
new file mode 100644
index 0000000..a6f4f80
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/ineffassign_test.go
@@ -0,0 +1,25 @@
+package main
+
+import (
+ "strings"
+ "testing"
+)
+
+func Test(t *testing.T) {
+ fset, comments, ineff := checkPath("testdata/testdata.go")
+ expected := map[int]string{}
+ for _, c := range comments {
+ expected[fset.Position(c.Pos()).Line] = strings.TrimSpace(c.Text())
+ }
+
+ for _, id := range ineff {
+ line := fset.Position(id.Pos()).Line
+ if name, ok := expected[line]; !ok || name != id.Name {
+ t.Error("unexpected:", line, id.Name)
+ }
+ delete(expected, line)
+ }
+ for line, name := range expected {
+ t.Error("expected:", line, name)
+ }
+}
diff --git a/vendor/github.com/gordonklaus/ineffassign/list b/vendor/github.com/gordonklaus/ineffassign/list
new file mode 100644
index 0000000..7e6b1e7
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/list
@@ -0,0 +1,25 @@
+/Users/gordon/go/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go:493:5: offset assigned and not used
+/Users/gordon/go/src/code.google.com/p/freetype-go/freetype/truetype/truetype.go:289:11: offset assigned and not used
+/Users/gordon/go/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go:224:2: prefix assigned and not used
+/Users/gordon/go/src/code.google.com/p/freetype-go/freetype/truetype/truetype_test.go:239:3: s assigned and not used
+/Users/gordon/go/src/github.com/gordonklaus/flux/go/types/resolver.go:372:2: seenPkgs assigned and not used
+/Users/gordon/go/src/github.com/gopherjs/gopherjs/compiler/package.go:195:7: recvType assigned and not used
+/Users/gordon/go/src/golang.org/x/crypto/ocsp/ocsp.go:340:2: rest assigned and not used
+/Users/gordon/go/src/golang.org/x/crypto/openpgp/packet/opaque_test.go:35:6: err assigned and not used
+/Users/gordon/go/src/golang.org/x/crypto/otr/otr.go:641:6: in assigned and not used
+/Users/gordon/go/src/golang.org/x/crypto/otr/otr_test.go:198:17: err assigned and not used
+/Users/gordon/go/src/golang.org/x/crypto/ssh/benchmark_test.go:94:17: err assigned and not used
+/Users/gordon/go/src/golang.org/x/mobile/app/android.go:175:2: queue assigned and not used
+/Users/gordon/go/src/golang.org/x/mobile/cmd/gomobile/bind.go:411:2: w assigned and not used
+/Users/gordon/go/src/golang.org/x/mobile/cmd/gomobile/build.go:231:8: err assigned and not used
+/Users/gordon/go/src/golang.org/x/net/icmp/listen_posix.go:83:6: err assigned and not used
+/Users/gordon/go/src/golang.org/x/net/ipv4/control_unix.go:99:5: b assigned and not used
+/Users/gordon/go/src/golang.org/x/net/ipv4/control_unix.go:148:4: b assigned and not used
+/Users/gordon/go/src/golang.org/x/net/ipv6/control_unix.go:90:4: b assigned and not used
+/Users/gordon/go/src/golang.org/x/net/ipv6/control_unix.go:162:4: b assigned and not used
+/Users/gordon/go/src/golang.org/x/net/websocket/hybi.go:298:3: n assigned and not used
+/Users/gordon/go/src/golang.org/x/tools/cmd/callgraph/main.go:164:2: args assigned and not used
+/Users/gordon/go/src/golang.org/x/tools/cmd/cover/cover_test.go:52:2: err assigned and not used
+/Users/gordon/go/src/golang.org/x/tools/go/gcimporter/exportdata.go:74:13: size assigned and not used
+/Users/gordon/go/src/golang.org/x/tools/oracle/oracle.go:268:2: iprog assigned and not used
+/Users/gordon/go/src/golang.org/x/tools/oracle/oracle_test.go:299:2: iprog assigned and not used
diff --git a/vendor/github.com/gordonklaus/ineffassign/liststd b/vendor/github.com/gordonklaus/ineffassign/liststd
new file mode 100644
index 0000000..591d026
--- /dev/null
+++ b/vendor/github.com/gordonklaus/ineffassign/liststd
@@ -0,0 +1,131 @@
+/usr/local/go/src/bufio/scan.go:388:6: ineffectual assignment to width
+/usr/local/go/src/bufio/scan.go:396:6: ineffectual assignment to width
+/usr/local/go/src/bytes/buffer_test.go:141:6: ineffectual assignment to err
+/usr/local/go/src/bytes/buffer_test.go:164:3: ineffectual assignment to c
+/usr/local/go/src/cmd/cgo/out.go:799:3: ineffectual assignment to gccResult
+/usr/local/go/src/cmd/compile/internal/big/ratconv.go:170:4: ineffectual assignment to err
+/usr/local/go/src/cmd/compile/internal/gc/bimport.go:330:2: ineffectual assignment to file
+/usr/local/go/src/cmd/compile/internal/gc/cgen.go:3332:3: ineffectual assignment to max
+/usr/local/go/src/cmd/compile/internal/gc/export.go:379:2: ineffectual assignment to size
+/usr/local/go/src/cmd/compile/internal/gc/global_test.go:51:2: ineffectual assignment to out
+/usr/local/go/src/cmd/compile/internal/gc/lex.go:281:4: ineffectual assignment to c1
+/usr/local/go/src/cmd/compile/internal/gc/reg.go:1373:2: ineffectual assignment to firstf
+/usr/local/go/src/cmd/compile/internal/gc/reg.go:1381:3: ineffectual assignment to firstf
+/usr/local/go/src/cmd/compile/internal/s390x/peep.go:1048:3: ineffectual assignment to size
+/usr/local/go/src/cmd/compile/internal/s390x/peep.go:1139:3: ineffectual assignment to size
+/usr/local/go/src/cmd/compile/internal/ssa/loopbce.go:44:3: ineffectual assignment to entry
+/usr/local/go/src/cmd/cover/html.go:64:8: ineffectual assignment to err
+/usr/local/go/src/cmd/cover/html.go:66:8: ineffectual assignment to err
+/usr/local/go/src/cmd/go/build.go:3355:3: ineffectual assignment to cgoLDFLAGS
+/usr/local/go/src/cmd/internal/goobj/read.go:532:3: ineffectual assignment to data
+/usr/local/go/src/cmd/internal/obj/arm64/obj7.go:600:2: ineffectual assignment to aoffset
+/usr/local/go/src/cmd/internal/obj/mips/asm0.go:1049:3: ineffectual assignment to v
+/usr/local/go/src/cmd/internal/obj/mips/asm0.go:1101:3: ineffectual assignment to v
+/usr/local/go/src/cmd/internal/obj/s390x/objz.go:609:3: ineffectual assignment to pLast
+/usr/local/go/src/cmd/internal/pprof/profile/encode.go:279:12: ineffectual assignment to err
+/usr/local/go/src/cmd/link/internal/ld/dwarf.go:1426:2: ineffectual assignment to unitstart
+/usr/local/go/src/cmd/link/internal/ld/dwarf.go:1427:2: ineffectual assignment to headerstart
+/usr/local/go/src/cmd/link/internal/ld/dwarf.go:1428:2: ineffectual assignment to headerend
+/usr/local/go/src/cmd/link/internal/ld/elf.go:2272:3: ineffectual assignment to resoff
+/usr/local/go/src/cmd/vet/print.go:227:9: ineffectual assignment to w
+/usr/local/go/src/cmd/yacc/yacc.go:770:2: ineffectual assignment to val
+/usr/local/go/src/cmd/yacc/yacc.go:3127:2: ineffectual assignment to i
+/usr/local/go/src/compress/bzip2/huffman.go:114:4: ineffectual assignment to length
+/usr/local/go/src/compress/flate/reader_test.go:53:3: ineffectual assignment to buf0
+/usr/local/go/src/compress/flate/writer_test.go:29:3: ineffectual assignment to buf0
+/usr/local/go/src/compress/gzip/gzip_test.go:211:5: ineffectual assignment to err
+/usr/local/go/src/compress/lzw/reader_test.go:148:4: ineffectual assignment to buf0
+/usr/local/go/src/compress/lzw/writer_test.go:146:3: ineffectual assignment to buf0
+/usr/local/go/src/container/list/list_test.go:286:2: ineffectual assignment to e1
+/usr/local/go/src/container/list/list_test.go:286:6: ineffectual assignment to e2
+/usr/local/go/src/container/list/list_test.go:286:10: ineffectual assignment to e3
+/usr/local/go/src/container/list/list_test.go:286:14: ineffectual assignment to e4
+/usr/local/go/src/crypto/elliptic/p224.go:722:10: ineffectual assignment to bytes
+/usr/local/go/src/crypto/tls/handshake_messages.go:289:3: ineffectual assignment to z
+/usr/local/go/src/crypto/x509/verify.go:110:5: ineffectual assignment to certName
+/usr/local/go/src/database/sql/sql_test.go:1705:4: ineffectual assignment to numOpen
+/usr/local/go/src/database/sql/sql_test.go:1839:5: ineffectual assignment to err
+/usr/local/go/src/debug/dwarf/type.go:540:5: ineffectual assignment to haveBitOffset
+/usr/local/go/src/debug/elf/file.go:1014:3: ineffectual assignment to suffix
+/usr/local/go/src/debug/gosym/pclntab_test.go:256:2: ineffectual assignment to off
+/usr/local/go/src/debug/pe/file_test.go:309:2: ineffectual assignment to err
+/usr/local/go/src/encoding/base32/base32_test.go:120:4: ineffectual assignment to count
+/usr/local/go/src/encoding/base64/base64_test.go:174:4: ineffectual assignment to count
+/usr/local/go/src/encoding/gob/decgen.go:187:6: ineffectual assignment to err
+/usr/local/go/src/encoding/gob/encgen.go:166:6: ineffectual assignment to err
+/usr/local/go/src/encoding/json/encode.go:1071:2: ineffectual assignment to count
+/usr/local/go/src/encoding/json/encode.go:1169:6: ineffectual assignment to advance
+/usr/local/go/src/encoding/xml/xml.go:1030:6: ineffectual assignment to ok
+/usr/local/go/src/fmt/print.go:936:2: ineffectual assignment to afterIndex
+/usr/local/go/src/fmt/print.go:1051:15: ineffectual assignment to afterIndex
+/usr/local/go/src/go/ast/filter.go:84:3: ineffectual assignment to keepField
+/usr/local/go/src/go/internal/gcimporter/bimport.go:215:2: ineffectual assignment to file
+/usr/local/go/src/go/printer/nodes.go:439:4: ineffectual assignment to extraTabs
+/usr/local/go/src/go/printer/printer_test.go:155:8: ineffectual assignment to err
+/usr/local/go/src/go/types/conversions.go:49:2: ineffectual assignment to final
+/usr/local/go/src/html/template/css.go:160:2: ineffectual assignment to r
+/usr/local/go/src/html/template/css.go:160:5: ineffectual assignment to w
+/usr/local/go/src/html/template/html.go:141:2: ineffectual assignment to r
+/usr/local/go/src/html/template/html.go:141:5: ineffectual assignment to w
+/usr/local/go/src/html/template/js.go:249:2: ineffectual assignment to r
+/usr/local/go/src/html/template/js.go:249:5: ineffectual assignment to w
+/usr/local/go/src/image/decode_test.go:125:9: ineffectual assignment to err
+/usr/local/go/src/image/png/reader.go:689:2: ineffectual assignment to n
+/usr/local/go/src/image/png/writer.go:269:3: ineffectual assignment to best
+/usr/local/go/src/io/io_test.go:245:2: ineffectual assignment to n
+/usr/local/go/src/io/ioutil/ioutil.go:149:2: ineffectual assignment to readSize
+/usr/local/go/src/io/ioutil/ioutil_test.go:24:2: ineffectual assignment to contents
+/usr/local/go/src/log/syslog/syslog_test.go:236:5: ineffectual assignment to err
+/usr/local/go/src/log/syslog/syslog_test.go:240:5: ineffectual assignment to err
+/usr/local/go/src/math/big/ratconv.go:176:4: ineffectual assignment to err
+/usr/local/go/src/mime/multipart/multipart_test.go:408:2: ineffectual assignment to p
+/usr/local/go/src/net/dial_test.go:381:6: ineffectual assignment to err
+/usr/local/go/src/net/dnsname_test.go:36:6: ineffectual assignment to char63
+/usr/local/go/src/net/dnsname_test.go:37:6: ineffectual assignment to char64
+/usr/local/go/src/net/fd_plan9.go:64:4: ineffectual assignment to err
+/usr/local/go/src/net/fd_windows.go:166:3: ineffectual assignment to err
+/usr/local/go/src/net/http/fs.go:413:5: ineffectual assignment to name
+/usr/local/go/src/net/http/h2_bundle.go:6249:4: ineffectual assignment to n
+/usr/local/go/src/net/http/request_test.go:155:13: ineffectual assignment to err
+/usr/local/go/src/net/http/serve_test.go:4053:13: ineffectual assignment to err
+/usr/local/go/src/net/http/transport_test.go:729:8: ineffectual assignment to err
+/usr/local/go/src/net/http/transport_test.go:2345:3: ineffectual assignment to slurp
+/usr/local/go/src/net/parse.go:27:2: ineffectual assignment to i
+/usr/local/go/src/net/rpc/server.go:270:3: ineffectual assignment to str
+/usr/local/go/src/net/udpsock_plan9.go:80:16: ineffectual assignment to i
+/usr/local/go/src/os/env_test.go:109:2: ineffectual assignment to value
+/usr/local/go/src/os/os_test.go:1080:5: ineffectual assignment to err
+/usr/local/go/src/os/path_test.go:122:2: ineffectual assignment to testit
+/usr/local/go/src/reflect/type.go:2379:3: ineffectual assignment to name
+/usr/local/go/src/regexp/exec.go:123:2: ineffectual assignment to r
+/usr/local/go/src/regexp/exec.go:124:2: ineffectual assignment to width
+/usr/local/go/src/regexp/exec.go:321:2: ineffectual assignment to r
+/usr/local/go/src/regexp/exec.go:322:2: ineffectual assignment to width
+/usr/local/go/src/regexp/onepass.go:338:15: ineffectual assignment to matchArg
+/usr/local/go/src/regexp/syntax/parse.go:577:2: ineffectual assignment to start
+/usr/local/go/src/runtime/lfstack_test.go:48:2: ineffectual assignment to nodes
+/usr/local/go/src/runtime/mbitmap.go:1458:3: ineffectual assignment to i
+/usr/local/go/src/runtime/mfinal_test.go:60:4: ineffectual assignment to v
+/usr/local/go/src/runtime/mfinal_test.go:98:3: ineffectual assignment to v
+/usr/local/go/src/runtime/mgcmark.go:414:2: ineffectual assignment to stolen
+/usr/local/go/src/runtime/mgcsweep.go:188:2: ineffectual assignment to nfree
+/usr/local/go/src/runtime/os_plan9.go:307:2: ineffectual assignment to n
+/usr/local/go/src/runtime/pprof/pprof.go:465:5: ineffectual assignment to ok
+/usr/local/go/src/runtime/pprof/pprof.go:608:5: ineffectual assignment to ok
+/usr/local/go/src/runtime/pprof/pprof.go:751:5: ineffectual assignment to ok
+/usr/local/go/src/runtime/proc.go:4227:3: ineffectual assignment to xname
+/usr/local/go/src/runtime/runtime1.go:360:3: ineffectual assignment to field
+/usr/local/go/src/runtime/runtime_mmap_test.go:25:2: ineffectual assignment to p
+/usr/local/go/src/runtime/softfloat64.go:228:3: ineffectual assignment to f
+/usr/local/go/src/runtime/softfloat64.go:228:6: ineffectual assignment to g
+/usr/local/go/src/runtime/stack_test.go:106:4: ineffectual assignment to s
+/usr/local/go/src/strconv/quote.go:23:6: ineffectual assignment to width
+/usr/local/go/src/sync/atomic/atomic_test.go:1122:2: ineffectual assignment to new
+/usr/local/go/src/sync/atomic/atomic_test.go:1150:2: ineffectual assignment to new
+/usr/local/go/src/syscall/dir_plan9.go:88:2: ineffectual assignment to b
+/usr/local/go/src/syscall/dir_plan9.go:131:13: ineffectual assignment to b
+/usr/local/go/src/syscall/exec_plan9.go:281:2: ineffectual assignment to r1
+/usr/local/go/src/syscall/mksyscall_windows.go:310:2: ineffectual assignment to s
+/usr/local/go/src/syscall/syscall_bsd_test.go:23:2: ineffectual assignment to n
+/usr/local/go/src/syscall/syscall_unix_test.go:187:17: ineffectual assignment to err
+/usr/local/go/src/text/template/multi_test.go:249:9: ineffectual assignment to err
diff --git a/vendor/github.com/juju/ansiterm/LICENSE b/vendor/github.com/juju/ansiterm/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/github.com/juju/ansiterm/Makefile b/vendor/github.com/juju/ansiterm/Makefile
new file mode 100644
index 0000000..212fdcb
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/Makefile
@@ -0,0 +1,14 @@
+# Copyright 2016 Canonical Ltd.
+# Licensed under the LGPLv3, see LICENCE file for details.
+
+default: check
+
+check:
+ go test
+
+docs:
+ godoc2md github.com/juju/ansiterm > README.md
+ sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md
+
+
+.PHONY: default check docs
diff --git a/vendor/github.com/juju/ansiterm/README.md b/vendor/github.com/juju/ansiterm/README.md
new file mode 100644
index 0000000..5674387
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/README.md
@@ -0,0 +1,323 @@
+
+# ansiterm
+ import "github.com/juju/ansiterm"
+
+Package ansiterm provides a Writer that writes out the ANSI escape
+codes for color and styles.
+
+
+
+
+
+
+
+## type Color
+``` go
+type Color int
+```
+Color represents one of the standard 16 ANSI colors.
+
+
+
+``` go
+const (
+ Default Color
+ Black
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ Gray
+ DarkGray
+ BrightRed
+ BrightGreen
+ BrightYellow
+ BrightBlue
+ BrightMagenta
+ BrightCyan
+ White
+)
+```
+
+
+
+
+
+
+
+
+### func (Color) String
+``` go
+func (c Color) String() string
+```
+String returns the name of the color.
+
+
+
+## type Context
+``` go
+type Context struct {
+ Foreground Color
+ Background Color
+ Styles []Style
+}
+```
+Context provides a way to specify both foreground and background colors
+along with other styles and write text to a Writer with those colors and
+styles.
+
+
+
+
+
+
+
+
+
+### func Background
+``` go
+func Background(color Color) *Context
+```
+Background is a convenience function that creates a Context with the
+specified color as the background color.
+
+
+### func Foreground
+``` go
+func Foreground(color Color) *Context
+```
+Foreground is a convenience function that creates a Context with the
+specified color as the foreground color.
+
+
+### func Styles
+``` go
+func Styles(styles ...Style) *Context
+```
+Styles is a convenience function that creates a Context with the
+specified styles set.
+
+
+
+
+### func (\*Context) Fprint
+``` go
+func (c *Context) Fprint(w sgrWriter, args ...interface{})
+```
+Fprint will set the sgr values of the writer to the specified foreground,
+background and styles, then formats using the default formats for its
+operands and writes to w. Spaces are added between operands when neither is
+a string. It returns the number of bytes written and any write error
+encountered.
+
+
+
+### func (\*Context) Fprintf
+``` go
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
+```
+Fprintf will set the sgr values of the writer to the specified
+foreground, background and styles, then write the formatted string,
+then reset the writer.
+
+
+
+### func (\*Context) SetBackground
+``` go
+func (c *Context) SetBackground(color Color) *Context
+```
+SetBackground sets the background to the specified color.
+
+
+
+### func (\*Context) SetForeground
+``` go
+func (c *Context) SetForeground(color Color) *Context
+```
+SetForeground sets the foreground to the specified color.
+
+
+
+### func (\*Context) SetStyle
+``` go
+func (c *Context) SetStyle(styles ...Style) *Context
+```
+SetStyle replaces the styles with the new values.
+
+
+
+## type Style
+``` go
+type Style int
+```
+
+
+``` go
+const (
+ Bold Style
+ Faint
+ Italic
+ Underline
+ Blink
+ Reverse
+ Strikethrough
+ Conceal
+)
+```
+
+
+
+
+
+
+
+
+### func (Style) String
+``` go
+func (s Style) String() string
+```
+
+
+## type TabWriter
+``` go
+type TabWriter struct {
+ Writer
+ // contains filtered or unexported fields
+}
+```
+TabWriter is a filter that inserts padding around tab-delimited
+columns in its input to align them in the output.
+
+It also setting of colors and styles over and above the standard
+tabwriter package.
+
+
+
+
+
+
+
+
+
+### func NewTabWriter
+``` go
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+NewTabWriter returns a writer that is able to set colors and styels.
+The ansi escape codes are stripped for width calculations.
+
+
+
+
+### func (\*TabWriter) Flush
+``` go
+func (t *TabWriter) Flush() error
+```
+Flush should be called after the last call to Write to ensure
+that any data buffered in the Writer is written to output. Any
+incomplete escape sequence at the end is considered
+complete for formatting purposes.
+
+
+
+### func (\*TabWriter) Init
+``` go
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+A Writer must be initialized with a call to Init. The first parameter (output)
+specifies the filter output. The remaining parameters control the formatting:
+
+
+ minwidth minimal cell width including any padding
+ tabwidth width of tab characters (equivalent number of spaces)
+ padding padding added to a cell before computing its width
+ padchar ASCII char used for padding
+ if padchar == '\t', the Writer will assume that the
+ width of a '\t' in the formatted output is tabwidth,
+ and cells are left-aligned independent of align_left
+ (for correct-looking results, tabwidth must correspond
+ to the tab width in the viewer displaying the result)
+ flags formatting control
+
+
+
+## type Writer
+``` go
+type Writer struct {
+ io.Writer
+ // contains filtered or unexported fields
+}
+```
+Writer allows colors and styles to be specified. If the io.Writer
+is not a terminal capable of color, all attempts to set colors or
+styles are no-ops.
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a Writer that allows the caller to specify colors and
+styles. If the io.Writer is not a terminal capable of color, all attempts
+to set colors or styles are no-ops.
+
+
+
+
+### func (\*Writer) ClearStyle
+``` go
+func (w *Writer) ClearStyle(s Style)
+```
+ClearStyle clears the text style.
+
+
+
+### func (\*Writer) Reset
+``` go
+func (w *Writer) Reset()
+```
+Reset returns the default foreground and background colors with no styles.
+
+
+
+### func (\*Writer) SetBackground
+``` go
+func (w *Writer) SetBackground(c Color)
+```
+SetBackground sets the background color.
+
+
+
+### func (\*Writer) SetForeground
+``` go
+func (w *Writer) SetForeground(c Color)
+```
+SetForeground sets the foreground color.
+
+
+
+### func (\*Writer) SetStyle
+``` go
+func (w *Writer) SetStyle(s Style)
+```
+SetStyle sets the text style.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/vendor/github.com/juju/ansiterm/attribute.go b/vendor/github.com/juju/ansiterm/attribute.go
new file mode 100644
index 0000000..f2daa48
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/attribute.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+type attribute int
+
+const (
+ unknownAttribute attribute = -1
+ reset attribute = 0
+)
+
+// sgr returns the escape sequence for the Select Graphic Rendition
+// for the attribute.
+func (a attribute) sgr() string {
+ if a < 0 {
+ return ""
+ }
+ return fmt.Sprintf("\x1b[%dm", a)
+}
+
+type attributes []attribute
+
+func (a attributes) Len() int { return len(a) }
+func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
+func (a attributes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// sgr returns the combined escape sequence for the Select Graphic Rendition
+// for the sequence of attributes.
+func (a attributes) sgr() string {
+ switch len(a) {
+ case 0:
+ return ""
+ case 1:
+ return a[0].sgr()
+ default:
+ sort.Sort(a)
+ var values []string
+ for _, attr := range a {
+ values = append(values, fmt.Sprint(attr))
+ }
+ return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
+ }
+}
diff --git a/vendor/github.com/juju/ansiterm/attribute_test.go b/vendor/github.com/juju/ansiterm/attribute_test.go
new file mode 100644
index 0000000..eebfd12
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/attribute_test.go
@@ -0,0 +1,30 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import gc "gopkg.in/check.v1"
+
+type attributeSuite struct{}
+
+var _ = gc.Suite(&attributeSuite{})
+
+func (*attributeSuite) TestSGR(c *gc.C) {
+ c.Check(unknownAttribute.sgr(), gc.Equals, "")
+ c.Check(reset.sgr(), gc.Equals, "\x1b[0m")
+ var yellow attribute = 33
+ c.Check(yellow.sgr(), gc.Equals, "\x1b[33m")
+}
+
+func (*attributeSuite) TestAttributes(c *gc.C) {
+ var a attributes
+ c.Check(a.sgr(), gc.Equals, "")
+ a = append(a, Yellow.foreground())
+ c.Check(a.sgr(), gc.Equals, "\x1b[33m")
+ a = append(a, Blue.background())
+ c.Check(a.sgr(), gc.Equals, "\x1b[33;44m")
+
+ // Add bold to the end to show sorting of the attributes.
+ a = append(a, Bold.enable())
+ c.Check(a.sgr(), gc.Equals, "\x1b[1;33;44m")
+}
diff --git a/vendor/github.com/juju/ansiterm/color.go b/vendor/github.com/juju/ansiterm/color.go
new file mode 100644
index 0000000..0a97de3
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/color.go
@@ -0,0 +1,119 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+ _ Color = iota
+ Default
+ Black
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ Gray
+ DarkGray
+ BrightRed
+ BrightGreen
+ BrightYellow
+ BrightBlue
+ BrightMagenta
+ BrightCyan
+ White
+)
+
+// Color represents one of the standard 16 ANSI colors.
+type Color int
+
+// String returns the name of the color.
+func (c Color) String() string {
+ switch c {
+ case Default:
+ return "default"
+ case Black:
+ return "black"
+ case Red:
+ return "red"
+ case Green:
+ return "green"
+ case Yellow:
+ return "yellow"
+ case Blue:
+ return "blue"
+ case Magenta:
+ return "magenta"
+ case Cyan:
+ return "cyan"
+ case Gray:
+ return "gray"
+ case DarkGray:
+ return "darkgray"
+ case BrightRed:
+ return "brightred"
+ case BrightGreen:
+ return "brightgreen"
+ case BrightYellow:
+ return "brightyellow"
+ case BrightBlue:
+ return "brightblue"
+ case BrightMagenta:
+ return "brightmagenta"
+ case BrightCyan:
+ return "brightcyan"
+ case White:
+ return "white"
+ default:
+ return ""
+ }
+}
+
+func (c Color) foreground() attribute {
+ switch c {
+ case Default:
+ return 39
+ case Black:
+ return 30
+ case Red:
+ return 31
+ case Green:
+ return 32
+ case Yellow:
+ return 33
+ case Blue:
+ return 34
+ case Magenta:
+ return 35
+ case Cyan:
+ return 36
+ case Gray:
+ return 37
+ case DarkGray:
+ return 90
+ case BrightRed:
+ return 91
+ case BrightGreen:
+ return 92
+ case BrightYellow:
+ return 93
+ case BrightBlue:
+ return 94
+ case BrightMagenta:
+ return 95
+ case BrightCyan:
+ return 96
+ case White:
+ return 97
+ default:
+ return unknownAttribute
+ }
+}
+
+func (c Color) background() attribute {
+ value := c.foreground()
+ if value != unknownAttribute {
+ return value + 10
+ }
+ return value
+}
diff --git a/vendor/github.com/juju/ansiterm/color_test.go b/vendor/github.com/juju/ansiterm/color_test.go
new file mode 100644
index 0000000..45ec3cb
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/color_test.go
@@ -0,0 +1,22 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import gc "gopkg.in/check.v1"
+
+type colorSuite struct{}
+
+var _ = gc.Suite(&colorSuite{})
+
+func (*colorSuite) TestString(c *gc.C) {
+ c.Check(Default.String(), gc.Equals, "default")
+ c.Check(Yellow.String(), gc.Equals, "yellow")
+ c.Check(BrightMagenta.String(), gc.Equals, "brightmagenta")
+ var blank Color
+ c.Check(blank.String(), gc.Equals, "")
+ var huge Color = 1234
+ c.Check(huge.String(), gc.Equals, "")
+ var negative Color = -1
+ c.Check(negative.String(), gc.Equals, "")
+}
diff --git a/vendor/github.com/juju/ansiterm/context.go b/vendor/github.com/juju/ansiterm/context.go
new file mode 100644
index 0000000..e61a867
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/context.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "io"
+)
+
+// Context provides a way to specify both foreground and background colors
+// along with other styles and write text to a Writer with those colors and
+// styles.
+type Context struct {
+ Foreground Color
+ Background Color
+ Styles []Style
+}
+
+// Foreground is a convenience function that creates a Context with the
+// specified color as the foreground color.
+func Foreground(color Color) *Context {
+ return &Context{Foreground: color}
+}
+
+// Background is a convenience function that creates a Context with the
+// specified color as the background color.
+func Background(color Color) *Context {
+ return &Context{Background: color}
+}
+
+// Styles is a convenience function that creates a Context with the
+// specified styles set.
+func Styles(styles ...Style) *Context {
+ return &Context{Styles: styles}
+}
+
+// SetForeground sets the foreground to the specified color.
+func (c *Context) SetForeground(color Color) *Context {
+ c.Foreground = color
+ return c
+}
+
+// SetBackground sets the background to the specified color.
+func (c *Context) SetBackground(color Color) *Context {
+ c.Background = color
+ return c
+}
+
+// SetStyle replaces the styles with the new values.
+func (c *Context) SetStyle(styles ...Style) *Context {
+ c.Styles = styles
+ return c
+}
+
+type sgrWriter interface {
+ io.Writer
+ writeSGR(value sgr)
+}
+
+// Fprintf will set the sgr values of the writer to the specified
+// foreground, background and styles, then write the formatted string,
+// then reset the writer.
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
+ w.writeSGR(c)
+ fmt.Fprintf(w, format, args...)
+ w.writeSGR(reset)
+}
+
+// Fprint will set the sgr values of the writer to the specified foreground,
+// background and styles, then formats using the default formats for its
+// operands and writes to w. Spaces are added between operands when neither is
+// a string. It returns the number of bytes written and any write error
+// encountered.
+func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
+ w.writeSGR(c)
+ fmt.Fprint(w, args...)
+ w.writeSGR(reset)
+}
+
+func (c *Context) sgr() string {
+ var values attributes
+ if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
+ values = append(values, foreground)
+ }
+ if background := c.Background.background(); background != unknownAttribute {
+ values = append(values, background)
+ }
+ for _, style := range c.Styles {
+ if value := style.enable(); value != unknownAttribute {
+ values = append(values, value)
+ }
+ }
+ return values.sgr()
+}
diff --git a/vendor/github.com/juju/ansiterm/context_test.go b/vendor/github.com/juju/ansiterm/context_test.go
new file mode 100644
index 0000000..47f46a0
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/context_test.go
@@ -0,0 +1,104 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "bytes"
+
+ gc "gopkg.in/check.v1"
+)
+
+type contextSuite struct{}
+
+var _ = gc.Suite(&contextSuite{})
+
+func (*contextSuite) newWriter() (*bytes.Buffer, *Writer) {
+ buff := &bytes.Buffer{}
+ writer := NewWriter(buff)
+ writer.noColor = false
+ return buff, writer
+}
+
+func (*contextSuite) TestBlank(c *gc.C) {
+ var context Context
+ c.Assert(context.sgr(), gc.Equals, "")
+}
+
+func (*contextSuite) TestAllUnknown(c *gc.C) {
+ context := Context{
+ Foreground: 123,
+ Background: 432,
+ Styles: []Style{456, 99},
+ }
+ c.Assert(context.sgr(), gc.Equals, "")
+}
+
+func (*contextSuite) TestForeground(c *gc.C) {
+ context := Foreground(Yellow)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[33m")
+}
+
+func (*contextSuite) TestBackground(c *gc.C) {
+ context := Background(Blue)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[44m")
+}
+
+func (*contextSuite) TestStyles(c *gc.C) {
+ context := Styles(Bold, Italic)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[1;3m")
+}
+
+func (*contextSuite) TestValid(c *gc.C) {
+ context := Context{
+ Foreground: Yellow,
+ Background: Blue,
+ Styles: []Style{Bold, Italic},
+ }
+ c.Assert(context.sgr(), gc.Equals, "\x1b[1;3;33;44m")
+}
+
+func (*contextSuite) TestSetForeground(c *gc.C) {
+ var context Context
+ context.SetForeground(Yellow)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[33m")
+}
+
+func (*contextSuite) TestSetBackground(c *gc.C) {
+ var context Context
+ context.SetBackground(Blue)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[44m")
+}
+
+func (*contextSuite) TestSetStyles(c *gc.C) {
+ var context Context
+ context.SetStyle(Bold, Italic)
+ c.Assert(context.sgr(), gc.Equals, "\x1b[1;3m")
+}
+
+func (s *contextSuite) TestFprintfNoColor(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.noColor = true
+
+ context := Context{
+ Foreground: Yellow,
+ Background: Blue,
+ Styles: []Style{Bold, Italic},
+ }
+
+ context.Fprintf(writer, "hello %s, %d", "world", 42)
+ c.Assert(buff.String(), gc.Equals, "hello world, 42")
+}
+
+func (s *contextSuite) TestFprintfColor(c *gc.C) {
+ buff, writer := s.newWriter()
+
+ context := Context{
+ Foreground: Yellow,
+ Background: Blue,
+ Styles: []Style{Bold, Italic},
+ }
+
+ context.Fprintf(writer, "hello %s, %d", "world", 42)
+ c.Assert(buff.String(), gc.Equals, "\x1b[1;3;33;44mhello world, 42\x1b[0m")
+}
diff --git a/vendor/github.com/juju/ansiterm/doc.go b/vendor/github.com/juju/ansiterm/doc.go
new file mode 100644
index 0000000..7827007
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// Package ansiterm provides a Writer that writes out the ANSI escape
+// codes for color and styles.
+package ansiterm
diff --git a/vendor/github.com/juju/ansiterm/package_test.go b/vendor/github.com/juju/ansiterm/package_test.go
new file mode 100644
index 0000000..fb15919
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/package_test.go
@@ -0,0 +1,14 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "testing"
+
+ gc "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ gc.TestingT(t)
+}
diff --git a/vendor/github.com/juju/ansiterm/style.go b/vendor/github.com/juju/ansiterm/style.go
new file mode 100644
index 0000000..0be42da
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/style.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+ _ Style = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ Blink
+ Reverse
+ Strikethrough
+ Conceal
+)
+
+type Style int
+
+func (s Style) String() string {
+ switch s {
+ case Bold:
+ return "bold"
+ case Faint:
+ return "faint"
+ case Italic:
+ return "italic"
+ case Underline:
+ return "underline"
+ case Blink:
+ return "blink"
+ case Reverse:
+ return "reverse"
+ case Strikethrough:
+ return "strikethrough"
+ case Conceal:
+ return "conceal"
+ default:
+ return ""
+ }
+}
+
+func (s Style) enable() attribute {
+ switch s {
+ case Bold:
+ return 1
+ case Faint:
+ return 2
+ case Italic:
+ return 3
+ case Underline:
+ return 4
+ case Blink:
+ return 5
+ case Reverse:
+ return 7
+ case Conceal:
+ return 8
+ case Strikethrough:
+ return 9
+ default:
+ return unknownAttribute
+ }
+}
+
+func (s Style) disable() attribute {
+ value := s.enable()
+ if value != unknownAttribute {
+ return value + 20
+ }
+ return value
+}
diff --git a/vendor/github.com/juju/ansiterm/style_test.go b/vendor/github.com/juju/ansiterm/style_test.go
new file mode 100644
index 0000000..998a7ef
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/style_test.go
@@ -0,0 +1,21 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import gc "gopkg.in/check.v1"
+
+type styleSuite struct{}
+
+var _ = gc.Suite(&styleSuite{})
+
+func (*styleSuite) TestString(c *gc.C) {
+ c.Check(Bold.String(), gc.Equals, "bold")
+ c.Check(Strikethrough.String(), gc.Equals, "strikethrough")
+ var blank Style
+ c.Check(blank.String(), gc.Equals, "")
+ var huge Style = 1234
+ c.Check(huge.String(), gc.Equals, "")
+ var negative Style = -1
+ c.Check(negative.String(), gc.Equals, "")
+}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter.go
new file mode 100644
index 0000000..1ff6faa
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "io"
+
+ "github.com/juju/ansiterm/tabwriter"
+)
+
+// NewTabWriter returns a writer that is able to set colors and styels.
+// The ansi escape codes are stripped for width calculations.
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+ return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
+
+// TabWriter is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// It also setting of colors and styles over and above the standard
+// tabwriter package.
+type TabWriter struct {
+ Writer
+ tw tabwriter.Writer
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (t *TabWriter) Flush() error {
+ return t.tw.Flush()
+}
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (t *TabWriter) SetColumnAlignRight(column int) {
+ t.tw.SetColumnAlignRight(column)
+}
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+ writer, colorCapable := colorEnabledWriter(output)
+ t.Writer = Writer{
+ Writer: t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
+ noColor: !colorCapable,
+ }
+ return t
+}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/LICENSE b/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
new file mode 100644
index 0000000..98949d0
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is mostly a copy of the go standard library text/tabwriter. With
+// the additional stripping of ansi control characters for width calculations.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+
+ "github.com/lunixbochs/vtclean"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+ size int // cell size in bytes
+ width int // cell width in runes
+ htab bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+ // configuration
+ output io.Writer
+ minwidth int
+ tabwidth int
+ padding int
+ padbytes [8]byte
+ flags uint
+
+ // current state
+ buf bytes.Buffer // collected text excluding tabs or line breaks
+ pos int // buffer position up to which cell.width of incomplete cell has been computed
+ cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+ endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+ lines [][]cell // list of lines; each line is a list of cells
+ widths []int // list of column widths in runes - re-used during formatting
+ alignment map[int]uint // column alignment
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+ b.buf.Reset()
+ b.pos = 0
+ b.cell = cell{}
+ b.endChar = 0
+ b.lines = b.lines[0:0]
+ b.widths = b.widths[0:0]
+ b.alignment = make(map[int]uint)
+ b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+// (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+// position pos; html tags and entities are excluded from this width if html
+// filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+// which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+// formatting; it is kept in Writer because it's re-used
+//
+// |<---------- size ---------->|
+// | |
+// |<- width ->|<- ignored ->| |
+// | | | |
+// [---processed---tab------------......]
+// ^ ^ ^
+// | | |
+// buf start of incomplete cell pos
+
+// Formatting can be controlled with these flags.
+const (
+ // Ignore html tags and treat entities (starting with '&'
+ // and ending in ';') as single characters (width = 1).
+ FilterHTML uint = 1 << iota
+
+ // Strip Escape characters bracketing escaped text segments
+ // instead of passing them through unchanged with the text.
+ StripEscape
+
+ // Force right-alignment of cell content.
+ // Default is left-alignment.
+ AlignRight
+
+ // Handle empty columns as if they were not present in
+ // the input in the first place.
+ DiscardEmptyColumns
+
+ // Always use tabs for indentation columns (i.e., padding of
+ // leading empty cells on the left) independent of padchar.
+ TabIndent
+
+ // Print a vertical bar ('|') between columns (after formatting).
+ // Discarded columns appear as zero-width columns ("||").
+ Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ if minwidth < 0 || tabwidth < 0 || padding < 0 {
+ panic("negative minwidth, tabwidth, or padding")
+ }
+ b.output = output
+ b.minwidth = minwidth
+ b.tabwidth = tabwidth
+ b.padding = padding
+ for i := range b.padbytes {
+ b.padbytes[i] = padchar
+ }
+ if padchar == '\t' {
+ // tab padding enforces left-alignment
+ flags &^= AlignRight
+ }
+ b.flags = flags
+
+ b.reset()
+
+ return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+ pos := 0
+ for i, line := range b.lines {
+ print("(", i, ") ")
+ for _, c := range line {
+ print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+ pos += c.size
+ }
+ print("\n")
+ }
+ print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err error
+}
+
+func (b *Writer) write0(buf []byte) {
+ n, err := b.output.Write(buf)
+ if n != len(buf) && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ panic(osError{err})
+ }
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+ for n > len(src) {
+ b.write0(src)
+ n -= len(src)
+ }
+ b.write0(src[0:n])
+}
+
+var (
+ newline = []byte{'\n'}
+ tabs = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+ if b.padbytes[0] == '\t' || useTabs {
+ // padding is done with tabs
+ if b.tabwidth == 0 {
+ return // tabs have no width - can't do any padding
+ }
+ // make cellw the smallest multiple of b.tabwidth
+ cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+ n := cellw - textw // amount of padding
+ if n < 0 {
+ panic("internal error")
+ }
+ b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+ return
+ }
+
+ // padding is done with non-tab characters
+ b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ for i := line0; i < line1; i++ {
+ line := b.lines[i]
+
+ // if TabIndent is set, use tabs to pad leading empty cells
+ useTabs := b.flags&TabIndent != 0
+
+ for j, c := range line {
+ if j > 0 && b.flags&Debug != 0 {
+ // indicate column break
+ b.write0(vbar)
+ }
+
+ if c.size == 0 {
+ // empty cell
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], useTabs)
+ }
+ } else {
+ // non-empty cell
+ useTabs = false
+ alignColumnRight := b.alignment[j] == AlignRight
+ if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ } else if alignColumnRight && j < len(b.widths) {
+ // just this column
+ internalSize := b.widths[j] - b.padding
+ if j < len(b.widths) {
+ b.writePadding(c.width, internalSize, false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ if b.padding > 0 {
+ b.writePadding(0, b.padding, false)
+ }
+ pos += c.size
+ } else { // align right
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ }
+ }
+ }
+
+ if i+1 == len(b.lines) {
+ // last buffered line - we don't have a newline, so just write
+ // any outstanding buffered data
+ b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+ pos += b.cell.size
+ } else {
+ // not the last line - write newline
+ b.write0(newline)
+ }
+ }
+ return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ column := len(b.widths)
+ for this := line0; this < line1; this++ {
+ line := b.lines[this]
+
+ if column < len(line)-1 {
+ // cell exists in this column => this line
+ // has more cells than the previous line
+ // (the last cell per line is ignored because cells are
+ // tab-terminated; the last cell per line describes the
+ // text before the newline/formfeed and does not belong
+ // to a column)
+
+ // print unprinted lines until beginning of block
+ pos = b.writeLines(pos, line0, this)
+ line0 = this
+
+ // column block begin
+ width := b.minwidth // minimal column width
+ discardable := true // true if all cells in this column are empty and "soft"
+ for ; this < line1; this++ {
+ line = b.lines[this]
+ if column < len(line)-1 {
+ // cell exists in this column
+ c := line[column]
+ // update width
+ if w := c.width + b.padding; w > width {
+ width = w
+ }
+ // update discardable
+ if c.width > 0 || c.htab {
+ discardable = false
+ }
+ } else {
+ break
+ }
+ }
+ // column block end
+
+ // discard empty columns if necessary
+ if discardable && b.flags&DiscardEmptyColumns != 0 {
+ width = 0
+ }
+
+ // format and print all columns to the right of this column
+ // (we know the widths of this column and all columns to the left)
+ b.widths = append(b.widths, width) // push width
+ pos = b.format(pos, line0, this)
+ b.widths = b.widths[0 : len(b.widths)-1] // pop width
+ line0 = this
+ }
+ }
+
+ // print unprinted lines until end
+ return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+ b.buf.Write(text)
+ b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+ // ---- Changes here -----
+ newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
+ cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
+ b.cell.width += utf8.RuneCount([]byte(cleaned))
+ // --- end of changes ----
+ b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+ switch ch {
+ case Escape:
+ b.endChar = Escape
+ case '<':
+ b.endChar = '>'
+ case '&':
+ b.endChar = ';'
+ }
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+ switch b.endChar {
+ case Escape:
+ b.updateWidth()
+ if b.flags&StripEscape == 0 {
+ b.cell.width -= 2 // don't count the Escape chars
+ }
+ case '>': // tag of zero width
+ case ';':
+ b.cell.width++ // entity, count as one rune
+ }
+ b.pos = b.buf.Len()
+ b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+ b.cell.htab = htab
+ line := &b.lines[len(b.lines)-1]
+ *line = append(*line, b.cell)
+ b.cell = cell{}
+ return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+ if e := recover(); e != nil {
+ if nerr, ok := e.(osError); ok {
+ *err = nerr.err
+ return
+ }
+ panic("tabwriter: panic during " + op)
+ }
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+ defer b.reset() // even in the presence of errors
+ defer handlePanic(&err, "Flush")
+
+ // add current cell if not empty
+ if b.cell.size > 0 {
+ if b.endChar != 0 {
+ // inside escape - terminate it even if incomplete
+ b.endEscape()
+ }
+ b.terminateCell(false)
+ }
+
+ // format contents of buffer
+ b.format(0, 0, len(b.lines))
+
+ return
+}
+
+var hbar = []byte("---\n")
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (b *Writer) SetColumnAlignRight(column int) {
+ b.alignment[column] = AlignRight
+}
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+ defer handlePanic(&err, "Write")
+
+ // split text into cells
+ n = 0
+ for i, ch := range buf {
+ if b.endChar == 0 {
+ // outside escape
+ switch ch {
+ case '\t', '\v', '\n', '\f':
+ // end of cell
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i + 1 // ch consumed
+ ncells := b.terminateCell(ch == '\t')
+ if ch == '\n' || ch == '\f' {
+ // terminate line
+ b.addLine()
+ if ch == '\f' || ncells == 1 {
+ // A '\f' always forces a flush. Otherwise, if the previous
+ // line has only one cell which does not have an impact on
+ // the formatting of the following lines (the last cell per
+ // line is ignored by format()), thus we can flush the
+ // Writer contents.
+ if err = b.Flush(); err != nil {
+ return
+ }
+ if ch == '\f' && b.flags&Debug != 0 {
+ // indicate section break
+ b.write0(hbar)
+ }
+ }
+ }
+
+ case Escape:
+ // start of escaped sequence
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ if b.flags&StripEscape != 0 {
+ n++ // strip Escape
+ }
+ b.startEscape(Escape)
+
+ case '<', '&':
+ // possibly an html tag/entity
+ if b.flags&FilterHTML != 0 {
+ // begin of tag/entity
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ b.startEscape(ch)
+ }
+ }
+
+ } else {
+ // inside escape
+ if ch == b.endChar {
+ // end of tag/entity
+ j := i + 1
+ if ch == Escape && b.flags&StripEscape != 0 {
+ j = i // strip Escape
+ }
+ b.append(buf[n:j])
+ n = i + 1 // ch consumed
+ b.endEscape()
+ }
+ }
+ }
+
+ // append leftover text
+ b.append(buf[n:])
+ n = len(buf)
+ return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/tabwriter_test.go b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter_test.go
new file mode 100644
index 0000000..2066480
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter_test.go
@@ -0,0 +1,25 @@
+package tabwriter
+
+import (
+ "bytes"
+ "testing"
+
+ gc "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ gc.TestingT(t)
+}
+
+type tabwriterSuite struct{}
+
+var _ = gc.Suite(&tabwriterSuite{})
+
+func (s *tabwriterSuite) TestRightAlignOverflow(c *gc.C) {
+ var buf bytes.Buffer
+ tw := NewWriter(&buf, 0, 1, 2, ' ', 0)
+ tw.SetColumnAlignRight(2)
+ tw.Write([]byte("not\tenough\ttabs"))
+ tw.Flush()
+ c.Assert(buf.String(), gc.Equals, "not enough tabs")
+}
diff --git a/vendor/github.com/juju/ansiterm/terminal.go b/vendor/github.com/juju/ansiterm/terminal.go
new file mode 100644
index 0000000..96fd11c
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/terminal.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "io"
+ "os"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+// colorEnabledWriter returns a writer that can handle the ansi color codes
+// and true if the writer passed in is a terminal capable of color. If the
+// TERM environment variable is set to "dumb", the terminal is not considered
+// color capable.
+func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
+ f, ok := w.(*os.File)
+ if !ok {
+ return w, false
+ }
+ // Check the TERM environment variable specifically
+ // to check for "dumb" terminals.
+ if os.Getenv("TERM") == "dumb" {
+ return w, false
+ }
+ if !isatty.IsTerminal(f.Fd()) {
+ return w, false
+ }
+ return colorable.NewColorable(f), true
+}
diff --git a/vendor/github.com/juju/ansiterm/writer.go b/vendor/github.com/juju/ansiterm/writer.go
new file mode 100644
index 0000000..32437bb
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/writer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "io"
+)
+
+// Writer allows colors and styles to be specified. If the io.Writer
+// is not a terminal capable of color, all attempts to set colors or
+// styles are no-ops.
+type Writer struct {
+ io.Writer
+
+ noColor bool
+}
+
+// NewWriter returns a Writer that allows the caller to specify colors and
+// styles. If the io.Writer is not a terminal capable of color, all attempts
+// to set colors or styles are no-ops.
+func NewWriter(w io.Writer) *Writer {
+ writer, colorCapable := colorEnabledWriter(w)
+ return &Writer{
+ Writer: writer,
+ noColor: !colorCapable,
+ }
+}
+
+// SetColorCapable forces the writer to either write the ANSI escape color
+// if capable is true, or to not write them if capable is false.
+func (w *Writer) SetColorCapable(capable bool) {
+ w.noColor = !capable
+}
+
+// SetForeground sets the foreground color.
+func (w *Writer) SetForeground(c Color) {
+ w.writeSGR(c.foreground())
+}
+
+// SetBackground sets the background color.
+func (w *Writer) SetBackground(c Color) {
+ w.writeSGR(c.background())
+}
+
+// SetStyle sets the text style.
+func (w *Writer) SetStyle(s Style) {
+ w.writeSGR(s.enable())
+}
+
+// ClearStyle clears the text style.
+func (w *Writer) ClearStyle(s Style) {
+ w.writeSGR(s.disable())
+}
+
+// Reset returns the default foreground and background colors with no styles.
+func (w *Writer) Reset() {
+ w.writeSGR(reset)
+}
+
+type sgr interface {
+ // sgr returns the combined escape sequence for the Select Graphic Rendition.
+ sgr() string
+}
+
+// writeSGR takes the appropriate integer SGR parameters
+// and writes out the ANIS escape code.
+func (w *Writer) writeSGR(value sgr) {
+ if w.noColor {
+ return
+ }
+ fmt.Fprint(w, value.sgr())
+}
diff --git a/vendor/github.com/juju/ansiterm/writer_test.go b/vendor/github.com/juju/ansiterm/writer_test.go
new file mode 100644
index 0000000..6832931
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/writer_test.go
@@ -0,0 +1,77 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "bytes"
+
+ gc "gopkg.in/check.v1"
+)
+
+type writerSuite struct{}
+
+var _ = gc.Suite(&writerSuite{})
+
+func (*writerSuite) TestNoColor(c *gc.C) {
+ buff := &bytes.Buffer{}
+ writer := NewWriter(buff)
+ c.Check(writer.noColor, gc.Equals, true)
+
+ writer.SetForeground(Yellow)
+ writer.SetBackground(Blue)
+ writer.SetStyle(Bold)
+ writer.ClearStyle(Bold)
+ writer.Reset()
+
+ c.Check(buff.String(), gc.Equals, "")
+}
+
+func (*writerSuite) TestSetColorCapable(c *gc.C) {
+ buff := &bytes.Buffer{}
+ writer := NewWriter(buff)
+ c.Check(writer.noColor, gc.Equals, true)
+
+ writer.SetColorCapable(true)
+ c.Check(writer.noColor, gc.Equals, false)
+
+ writer.SetColorCapable(false)
+ c.Check(writer.noColor, gc.Equals, true)
+}
+
+func (*writerSuite) newWriter() (*bytes.Buffer, *Writer) {
+ buff := &bytes.Buffer{}
+ writer := NewWriter(buff)
+ writer.noColor = false
+ return buff, writer
+}
+
+func (s *writerSuite) TestSetForeground(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.SetForeground(Yellow)
+ c.Check(buff.String(), gc.Equals, "\x1b[33m")
+}
+
+func (s *writerSuite) TestSetBackground(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.SetBackground(Blue)
+ c.Check(buff.String(), gc.Equals, "\x1b[44m")
+}
+
+func (s *writerSuite) TestSetStyle(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.SetStyle(Bold)
+ c.Check(buff.String(), gc.Equals, "\x1b[1m")
+}
+
+func (s *writerSuite) TestClearStyle(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.ClearStyle(Bold)
+ c.Check(buff.String(), gc.Equals, "\x1b[21m")
+}
+
+func (s *writerSuite) TestReset(c *gc.C) {
+ buff, writer := s.newWriter()
+ writer.Reset()
+ c.Check(buff.String(), gc.Equals, "\x1b[0m")
+}
diff --git a/vendor/github.com/kennygrant/sanitize/.gitignore b/vendor/github.com/kennygrant/sanitize/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/kennygrant/sanitize/.travis.yml b/vendor/github.com/kennygrant/sanitize/.travis.yml
new file mode 100644
index 0000000..4f2ee4d
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/kennygrant/sanitize/LICENSE b/vendor/github.com/kennygrant/sanitize/LICENSE
new file mode 100644
index 0000000..749ebb2
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 Mechanism Design. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/github.com/kennygrant/sanitize/README.md b/vendor/github.com/kennygrant/sanitize/README.md
new file mode 100644
index 0000000..4401ef7
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/README.md
@@ -0,0 +1,62 @@
+sanitize [![GoDoc](https://godoc.org/github.com/kennygrant/sanitize?status.svg)](https://godoc.org/github.com/kennygrant/sanitize) [![Go Report Card](https://goreportcard.com/badge/github.com/kennygrant/sanitize)](https://goreportcard.com/report/github.com/kennygrant/sanitize) [![CircleCI](https://circleci.com/gh/kennygrant/sanitize.svg?style=svg)](https://circleci.com/gh/kennygrant/sanitize)
+========
+
+Package sanitize provides functions to sanitize html and paths with go (golang).
+
+FUNCTIONS
+
+
+```go
+sanitize.Accents(s string) string
+```
+
+Accents replaces a set of accented characters with ascii equivalents.
+
+```go
+sanitize.BaseName(s string) string
+```
+
+BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. Unlike Name no attempt is made to normalise text as a path.
+
+```go
+sanitize.HTML(s string) string
+```
+
+HTML strips html tags with a very simple parser, replace common entities, and escape < and > in the result. The result is intended to be used as plain text.
+
+```go
+sanitize.HTMLAllowing(s string, args...[]string) (string, error)
+```
+
+HTMLAllowing parses html and allow certain tags and attributes from the lists optionally specified by args - args[0] is a list of allowed tags, args[1] is a list of allowed attributes. If either is missing default sets are used.
+
+```go
+sanitize.Name(s string) string
+```
+
+Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters.
+
+```go
+sanitize.Path(s string) string
+```
+
+Path makes a string safe to use as an url path.
+
+
+Changes
+-------
+
+Version 1.2
+
+Adjusted HTML function to avoid linter warning
+Added more tests from https://githubengineering.com/githubs-post-csp-journey/
+Chnaged name of license file
+Added badges and change log to readme
+
+Version 1.1
+Fixed type in comments.
+Merge pull request from Povilas Balzaravicius Pawka
+ - replace br tags with newline even when they contain a space
+
+Version 1.0
+First release
\ No newline at end of file
diff --git a/vendor/github.com/kennygrant/sanitize/sanitize.go b/vendor/github.com/kennygrant/sanitize/sanitize.go
new file mode 100755
index 0000000..2932209
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/sanitize.go
@@ -0,0 +1,388 @@
+// Package sanitize provides functions for sanitizing text.
+package sanitize
+
+import (
+ "bytes"
+ "html"
+ "html/template"
+ "io"
+ "path"
+ "regexp"
+ "strings"
+
+ parser "golang.org/x/net/html"
+)
+
+var (
+ ignoreTags = []string{"title", "script", "style", "iframe", "frame", "frameset", "noframes", "noembed", "embed", "applet", "object", "base"}
+
+ defaultTags = []string{"h1", "h2", "h3", "h4", "h5", "h6", "div", "span", "hr", "p", "br", "b", "i", "strong", "em", "ol", "ul", "li", "a", "img", "pre", "code", "blockquote", "article", "section"}
+
+ defaultAttributes = []string{"id", "class", "src", "href", "title", "alt", "name", "rel"}
+)
+
+// HTMLAllowing sanitizes html, allowing some tags.
+// Arrays of allowed tags and allowed attributes may optionally be passed as the second and third arguments.
+func HTMLAllowing(s string, args ...[]string) (string, error) {
+
+ allowedTags := defaultTags
+ if len(args) > 0 {
+ allowedTags = args[0]
+ }
+ allowedAttributes := defaultAttributes
+ if len(args) > 1 {
+ allowedAttributes = args[1]
+ }
+
+ // Parse the html
+ tokenizer := parser.NewTokenizer(strings.NewReader(s))
+
+ buffer := bytes.NewBufferString("")
+ ignore := ""
+
+ for {
+ tokenType := tokenizer.Next()
+ token := tokenizer.Token()
+
+ switch tokenType {
+
+ case parser.ErrorToken:
+ err := tokenizer.Err()
+ if err == io.EOF {
+ return buffer.String(), nil
+ }
+ return "", err
+
+ case parser.StartTagToken:
+
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = cleanAttributes(token.Attr, allowedAttributes)
+ buffer.WriteString(token.String())
+ } else if includes(ignoreTags, token.Data) {
+ ignore = token.Data
+ }
+
+ case parser.SelfClosingTagToken:
+
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = cleanAttributes(token.Attr, allowedAttributes)
+ buffer.WriteString(token.String())
+ } else if token.Data == ignore {
+ ignore = ""
+ }
+
+ case parser.EndTagToken:
+ if len(ignore) == 0 && includes(allowedTags, token.Data) {
+ token.Attr = []parser.Attribute{}
+ buffer.WriteString(token.String())
+ } else if token.Data == ignore {
+ ignore = ""
+ }
+
+ case parser.TextToken:
+ // We allow text content through, unless ignoring this entire tag and its contents (including other tags)
+ if ignore == "" {
+ buffer.WriteString(token.String())
+ }
+ case parser.CommentToken:
+ // We ignore comments by default
+ case parser.DoctypeToken:
+ // We ignore doctypes by default - html5 does not require them and this is intended for sanitizing snippets of text
+ default:
+ // We ignore unknown token types by default
+
+ }
+
+ }
+
+}
+
+// HTML strips html tags, replace common entities, and escapes <>&;'" in the result.
+// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated.
+func HTML(s string) (output string) {
+
+ // Shortcut strings with no tags in them
+ if !strings.ContainsAny(s, "<>") {
+ output = s
+ } else {
+
+ // First remove line breaks etc as these have no meaning outside html tags (except pre)
+ // this means pre sections will lose formatting... but will result in less unintentional paras.
+ s = strings.Replace(s, "\n", "", -1)
+
+ // Then replace line breaks with newlines, to preserve that formatting
+ s = strings.Replace(s, "", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+ s = strings.Replace(s, "", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+ s = strings.Replace(s, " ", "\n", -1)
+
+ // Walk through the string removing all tags
+ b := bytes.NewBufferString("")
+ inTag := false
+ for _, r := range s {
+ switch r {
+ case '<':
+ inTag = true
+ case '>':
+ inTag = false
+ default:
+ if !inTag {
+ b.WriteRune(r)
+ }
+ }
+ }
+ output = b.String()
+ }
+
+ // Remove a few common harmless entities, to arrive at something more like plain text
+ output = strings.Replace(output, "‘", "'", -1)
+ output = strings.Replace(output, "’", "'", -1)
+ output = strings.Replace(output, "“", "\"", -1)
+ output = strings.Replace(output, "”", "\"", -1)
+ output = strings.Replace(output, " ", " ", -1)
+ output = strings.Replace(output, """, "\"", -1)
+ output = strings.Replace(output, "'", "'", -1)
+
+ // Translate some entities into their plain text equivalent (for example accents, if encoded as entities)
+ output = html.UnescapeString(output)
+
+ // In case we have missed any tags above, escape the text - removes <, >, &, ' and ".
+ output = template.HTMLEscapeString(output)
+
+ // After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString
+ output = strings.Replace(output, """, "\"", -1)
+ output = strings.Replace(output, "'", "'", -1)
+ output = strings.Replace(output, "& ", "& ", -1) // NB space after
+ output = strings.Replace(output, "& ", "& ", -1) // NB space after
+
+ return output
+}
+
+// We are very restrictive as this is intended for ascii url slugs
+var illegalPath = regexp.MustCompile(`[^[:alnum:]\~\-\./]`)
+
+// Path makes a string safe to use as a URL path,
+// removing accents and replacing separators with -.
+// The path may still start at / and is not intended
+// for use as a file system path without prefix.
+func Path(s string) string {
+ // Start with lowercase string
+ filePath := strings.ToLower(s)
+ filePath = strings.Replace(filePath, "..", "", -1)
+ filePath = path.Clean(filePath)
+
+ // Remove illegal characters for paths, flattening accents
+ // and replacing some common separators with -
+ filePath = cleanString(filePath, illegalPath)
+
+ // NB this may be of length 0, caller must check
+ return filePath
+}
+
+// Remove all other unrecognised characters apart from
+var illegalName = regexp.MustCompile(`[^[:alnum:]-.]`)
+
+// Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters.
+func Name(s string) string {
+ // Start with lowercase string
+ fileName := strings.ToLower(s)
+ fileName = path.Clean(path.Base(fileName))
+
+ // Remove illegal characters for names, replacing some common separators with -
+ fileName = cleanString(fileName, illegalName)
+
+ // NB this may be of length 0, caller must check
+ return fileName
+}
+
+// Replace these separators with -
+var baseNameSeparators = regexp.MustCompile(`[./]`)
+
+// BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -.
+// No attempt is made to normalise a path or normalise case.
+func BaseName(s string) string {
+
+ // Replace certain joining characters with a dash
+ baseName := baseNameSeparators.ReplaceAllString(s, "-")
+
+ // Remove illegal characters for names, replacing some common separators with -
+ baseName = cleanString(baseName, illegalName)
+
+ // NB this may be of length 0, caller must check
+ return baseName
+}
+
+// A very limited list of transliterations to catch common european names translated to urls.
+// This set could be expanded with at least caps and many more characters.
+var transliterations = map[rune]string{
+ 'À': "A",
+ 'Á': "A",
+ 'Â': "A",
+ 'Ã': "A",
+ 'Ä': "A",
+ 'Å': "AA",
+ 'Æ': "AE",
+ 'Ç': "C",
+ 'È': "E",
+ 'É': "E",
+ 'Ê': "E",
+ 'Ë': "E",
+ 'Ì': "I",
+ 'Í': "I",
+ 'Î': "I",
+ 'Ï': "I",
+ 'Ð': "D",
+ 'Ł': "L",
+ 'Ñ': "N",
+ 'Ò': "O",
+ 'Ó': "O",
+ 'Ô': "O",
+ 'Õ': "O",
+ 'Ö': "OE",
+ 'Ø': "OE",
+ 'Œ': "OE",
+ 'Ù': "U",
+ 'Ú': "U",
+ 'Ü': "UE",
+ 'Û': "U",
+ 'Ý': "Y",
+ 'Þ': "TH",
+ 'ẞ': "SS",
+ 'à': "a",
+ 'á': "a",
+ 'â': "a",
+ 'ã': "a",
+ 'ä': "ae",
+ 'å': "aa",
+ 'æ': "ae",
+ 'ç': "c",
+ 'è': "e",
+ 'é': "e",
+ 'ê': "e",
+ 'ë': "e",
+ 'ì': "i",
+ 'í': "i",
+ 'î': "i",
+ 'ï': "i",
+ 'ð': "d",
+ 'ł': "l",
+ 'ñ': "n",
+ 'ń': "n",
+ 'ò': "o",
+ 'ó': "o",
+ 'ô': "o",
+ 'õ': "o",
+ 'ō': "o",
+ 'ö': "oe",
+ 'ø': "oe",
+ 'œ': "oe",
+ 'ś': "s",
+ 'ù': "u",
+ 'ú': "u",
+ 'û': "u",
+ 'ū': "u",
+ 'ü': "ue",
+ 'ý': "y",
+ 'ÿ': "y",
+ 'ż': "z",
+ 'þ': "th",
+ 'ß': "ss",
+}
+
+// Accents replaces a set of accented characters with ascii equivalents.
+func Accents(s string) string {
+ // Replace some common accent characters
+ b := bytes.NewBufferString("")
+ for _, c := range s {
+ // Check transliterations first
+ if val, ok := transliterations[c]; ok {
+ b.WriteString(val)
+ } else {
+ b.WriteRune(c)
+ }
+ }
+ return b.String()
+}
+
+var (
+ // If the attribute contains data: or javascript: anywhere, ignore it
+ // we don't allow this in attributes as it is so frequently used for xss
+ // NB we allow spaces in the value, and lowercase.
+ illegalAttr = regexp.MustCompile(`(d\s*a\s*t\s*a|j\s*a\s*v\s*a\s*s\s*c\s*r\s*i\s*p\s*t\s*)\s*:`)
+
+ // We are far more restrictive with href attributes.
+ legalHrefAttr = regexp.MustCompile(`\A[/#][^/\\]?|mailto:|http://|https://`)
+)
+
+// cleanAttributes returns an array of attributes after removing malicious ones.
+func cleanAttributes(a []parser.Attribute, allowed []string) []parser.Attribute {
+ if len(a) == 0 {
+ return a
+ }
+
+ var cleaned []parser.Attribute
+ for _, attr := range a {
+ if includes(allowed, attr.Key) {
+
+ val := strings.ToLower(attr.Val)
+
+ // Check for illegal attribute values
+ if illegalAttr.FindString(val) != "" {
+ attr.Val = ""
+ }
+
+ // Check for legal href values - / mailto:// http:// or https://
+ if attr.Key == "href" {
+ if legalHrefAttr.FindString(val) == "" {
+ attr.Val = ""
+ }
+ }
+
+ // If we still have an attribute, append it to the array
+ if attr.Val != "" {
+ cleaned = append(cleaned, attr)
+ }
+ }
+ }
+ return cleaned
+}
+
+// A list of characters we consider separators in normal strings and replace with our canonical separator - rather than removing.
+var (
+ separators = regexp.MustCompile(`[ &_=+:]`)
+
+ dashes = regexp.MustCompile(`[\-]+`)
+)
+
+// cleanString replaces separators with - and removes characters listed in the regexp provided from string.
+// Accents, spaces, and all characters not in A-Za-z0-9 are replaced.
+func cleanString(s string, r *regexp.Regexp) string {
+
+ // Remove any trailing space to avoid ending on -
+ s = strings.Trim(s, " ")
+
+ // Flatten accents first so that if we remove non-ascii we still get a legible name
+ s = Accents(s)
+
+ // Replace certain joining characters with a dash
+ s = separators.ReplaceAllString(s, "-")
+
+ // Remove all other unrecognised characters - NB we do allow any printable characters
+ s = r.ReplaceAllString(s, "")
+
+ // Remove any multiple dashes caused by replacements above
+ s = dashes.ReplaceAllString(s, "-")
+
+ return s
+}
+
+// includes checks for inclusion of a string in a []string.
+func includes(a []string, s string) bool {
+ for _, as := range a {
+ if as == s {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/kennygrant/sanitize/sanitize_test.go b/vendor/github.com/kennygrant/sanitize/sanitize_test.go
new file mode 100644
index 0000000..a2242dc
--- /dev/null
+++ b/vendor/github.com/kennygrant/sanitize/sanitize_test.go
@@ -0,0 +1,236 @@
+// Utility functions for working with text
+package sanitize
+
+import (
+ "testing"
+)
+
+var Format = "\ninput: %q\nexpected: %q\noutput: %q"
+
+type Test struct {
+ input string
+ expected string
+}
+
+// NB the treatment of accents - they are removed and replaced with ascii transliterations
+var urls = []Test{
+ {"ReAd ME.md", `read-me.md`},
+ {"E88E08A7-279C-4CC1-8B90-86DE0D7044_3C.html", `e88e08a7-279c-4cc1-8b90-86de0d7044-3c.html`},
+ {"/user/test/I am a long url's_-?ASDF@£$%£%^testé.html", `/user/test/i-am-a-long-urls-asdfteste.html`},
+ {"/../../4-icon.jpg", `/4-icon.jpg`},
+ {"/Images_dir/../4-icon.jpg", `/images-dir/4-icon.jpg`},
+ {"../4 icon.*", `/4-icon.`},
+ {"Spac ey/Nôm/test før url", `spac-ey/nom/test-foer-url`},
+ {"../*", `/`},
+}
+
+func TestPath(t *testing.T) {
+ for _, test := range urls {
+ output := Path(test.input)
+ if output != test.expected {
+ t.Fatalf(Format, test.input, test.expected, output)
+ }
+ }
+}
+
+func BenchmarkPath(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, test := range urls {
+ output := Path(test.input)
+ if output != test.expected {
+ b.Fatalf(Format, test.input, test.expected, output)
+ }
+ }
+ }
+}
+
+var fileNames = []Test{
+ {"ReAd ME.md", `read-me.md`},
+ {"/var/etc/jobs/go/go/src/pkg/foo/bar.go", `bar.go`},
+ {"I am a long url's_-?ASDF@£$%£%^é.html", `i-am-a-long-urls-asdfe.html`},
+ {"/../../4-icon.jpg", `4-icon.jpg`},
+ {"/Images/../4-icon.jpg", `4-icon.jpg`},
+ {"../4 icon.jpg", `4-icon.jpg`},
+ {"../4 icon-testé *8%^\"'\".jpg ", `4-icon-teste-8.jpg`},
+ {"Überfluß an Döner macht schöner.JPEG", `ueberfluss-an-doener-macht-schoener.jpeg`},
+ {"Ä-_-Ü_:()_Ö-_-ä-_-ü-_-ö-_ß.webm", `ae-ue-oe-ae-ue-oe-ss.webm`},
+}
+
+func TestName(t *testing.T) {
+ for _, test := range fileNames {
+ output := Name(test.input)
+ if output != test.expected {
+ t.Fatalf(Format, test.input, test.expected, output)
+ }
+ }
+}
+
+func BenchmarkName(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, test := range fileNames {
+ output := Name(test.input)
+ if output != test.expected {
+ b.Fatalf(Format, test.input, test.expected, output)
+ }
+ }
+ }
+}
+
+var baseFileNames = []Test{
+ {"The power & the Glory jpg file. The end", `The-power-the-Glory-jpg-file-The-end`},
+ {"/../../4-iCoN.jpg", `-4-iCoN-jpg`},
+ {"And/Or", `And-Or`},
+ {"Sonic.EXE", `Sonic-EXE`},
+ {"012: #Fetch for Defaults", `012-Fetch-for-Defaults`},
+}
+
+func TestBaseName(t *testing.T) {
+ for _, test := range baseFileNames {
+ output := BaseName(test.input)
+ if output != test.expected {
+ t.Fatalf(Format, test.input, test.expected, output)
+ }
+ }
+}
+
+// Test with some malformed or malicious html
+// NB because we remove all tokens after a < until the next >
+// and do not attempt to parse, we should be safe from invalid html,
+// but will sometimes completely empty the string if we have invalid input
+// Note we sometimes use " in order to keep things on one line and use the ` character
+var htmlTests = []Test{
+ {` `, " "},
+ {`
`, `
`},
+ {`
`, ``},
+ {"
Bold Not bold
\nAlso not bold.", "Bold Not bold\nAlso not bold."},
+ {`FOO
ZOO`, "FOO\rZOO"},
+ {`">`, `alert("XSS")"`},
+ {``, ``},
+ {`