Skip to content

Commit

Permalink
WIP > Refactor the retry logic
Browse files Browse the repository at this point in the history
Instead of calling break/continue in the `for` loop directly, declare a `shouldRetry`
variable and switch on its value a the end of the loop.

With compliments to @honzakral.
  • Loading branch information
karmi committed Jul 30, 2019
1 parent 0201be2 commit 656fe3c
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 63 deletions.
48 changes: 20 additions & 28 deletions estransport/estransport.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
Expand Down Expand Up @@ -92,7 +93,6 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) {

dupReqBodyForLog io.ReadCloser

// TODO(karmi): Make dynamic based on number of URLs
maxRetries = defaultMaxRetries
)

Expand All @@ -113,10 +113,11 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) {
//
c.setUserAgent(req)

for i := 1; ; i++ {
var nodeURL *url.URL

// fmt.Printf("\nPerform: attempt %d\n", i)
for i := 1; i <= maxRetries; i++ {
var (
nodeURL *url.URL
shouldRetry bool
)

// Get URL from the Selector
//
Expand Down Expand Up @@ -151,45 +152,35 @@ func (c *Client) Perform(req *http.Request) (*http.Response, error) {
c.logRoundTrip(req, res, dupReqBodyForLog, err, start, dur)
}

// Break when there's only a single node
//
// TODO(karmi): Not if c.RetryOnSingleNode (ie. retry on Elasticsearch Service)
// Retry only on network errors, but don't retry on timeout error
//
if len(c.URLs()) < 2 {
// TODO(karmi): Add Len() to selector
break
if err != nil {
if err, ok := err.(net.Error); ok {
if !err.Timeout() {
shouldRetry = true
}
} else {
shouldRetry = true
}
}

// Continue on specific 5xx response errors
// Retry on specific 5xx response errors
//
// TODO(karmi): Only if c.RetryOnStatus != false
//
if res != nil {
var shouldRetry bool
for _, code := range defaultRetryOnStatus {
if res != nil && res.StatusCode == code {
if res.StatusCode == code {
shouldRetry = true
}
}
if shouldRetry {
continue
}
}

// Break if there's no error
//
if err == nil {
break
}

// Break if retries have been exhausted
// Break if retry should not be performed
//
if i >= maxRetries {
// fmt.Printf("Perform: aborting after %d attempts\n", i)
if !shouldRetry {
break
}

// TODO(karmi): If c.DisableRetryOnError => break
}

// TODO(karmi): Wrap error
Expand All @@ -206,6 +197,7 @@ func (c *Client) getURL() (*url.URL, error) {
return c.selector.Select()
}

// TODO(karmi): Rename to setReqURL()
func (c *Client) setURL(u *url.URL, req *http.Request) *http.Request {
req.URL.Scheme = u.Scheme
req.URL.Host = u.Host
Expand Down
35 changes: 0 additions & 35 deletions estransport/estransport_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,41 +284,6 @@ func TestTransportPerformRetries(t *testing.T) {
t.Errorf("Unexpected number of requests, want=%d, got=%d", numReqs, i)
}
})

t.Run("Don't retry for single URL", func(t *testing.T) {
var (
i int
numReqs = 1
)

u, _ := url.Parse("http://foo.bar")
tp := New(Config{
URLs: []*url.URL{u},
Transport: &mockTransp{
RoundTripFunc: func(req *http.Request) (*http.Response, error) {
i++
fmt.Printf("Request #%d", i)
fmt.Print(": ERR\n")
return nil, fmt.Errorf("Mock error (%d)", i)
},
}})

req, _ := http.NewRequest("GET", "/abc", nil)

res, err := tp.Perform(req)

if err == nil {
t.Fatalf("Expected error, got: %v", err)
}

if res != nil {
t.Errorf("Unexpected response: %+v", res)
}

if i != numReqs {
t.Errorf("Unexpected number of requests, want=%d, got=%d", numReqs, i)
}
})
}

func TestTransportSelector(t *testing.T) {
Expand Down

0 comments on commit 656fe3c

Please sign in to comment.