Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed len computation when size just goes beyond 14 bits #668

Merged
merged 12 commits into from
May 16, 2018
23 changes: 14 additions & 9 deletions compress_generate.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

129 changes: 129 additions & 0 deletions length_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"encoding/hex"
"fmt"
"net"
"strings"
"testing"
)

Expand Down Expand Up @@ -79,6 +80,92 @@ func TestMsgLength(t *testing.T) {
}
}

func TestCompressionLenHelper(t *testing.T) {
c := make(map[string]int)
compressionLenHelper(c, "example.com", 12)
if c["example.com"] != 11 {
t.Errorf("bad %d", c["example.com"])
}
if c["com"] != 3 {
t.Errorf("bad %d", c["com"])
}

// Test boundaries
c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16384
compressionLenHelper(c, "foo.com", 16379)
if c["foo.com"] != 7 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is accessible
if c["com"] != 3 {
t.Errorf("bad %d", c["com"])
}

c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16385 => outside range
compressionLenHelper(c, "foo.com", 16380)
if c["foo.com"] != 7 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is NOT accessible
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}

c = make(map[string]int)
compressionLenHelper(c, "example.com", 16375)
if c["example.com"] != 11 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 3 {
t.Errorf("bad %d", c["com"])
}

c = make(map[string]int)
compressionLenHelper(c, "example.com", 16376)
if c["example.com"] != 11 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}
}

func TestCompressionLenSearch(t *testing.T) {
c := make(map[string]int)
compressed, ok, fullSize := compressionLenSearch(c, "a.b.org.")
if compressed != 0 || ok || fullSize != 14 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["org."] = 3
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 4 || !ok || fullSize != 8 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Not found long compression
c["x.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Found long compression
c["a.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 8 || !ok || fullSize != 0 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
}

func TestMsgLength2(t *testing.T) {
// Serialized replies
var testMessages = []string{
Expand Down Expand Up @@ -172,3 +259,45 @@ func TestMsgCompressLengthLargeRecords(t *testing.T) {
t.Fatalf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}

func TestMsgCompressLengthLargeRecordsWithPaddingPermutation(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("my.service.acme.", TypeSRV)

for i := 0; i < 250; i++ {
target := fmt.Sprintf("host-redis-x-%d.test.acme.com.node.dc1.consul.", i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.x.%d.", i)})
}
for labelSize := 1; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("my.%s.service.acme.", strings.Repeat("x", labelSize)), TypeSRV)
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("padding= %d ; predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", labelSize, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}

func TestMsgCompressLengthLargeRecordsAllValues(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("redis.service.consul.", TypeSRV)
for i := 0; i < 900; i++ {
target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)})
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("predicted compressed length is wrong for %d records: predicted %s (len=%d) %d, actual %d", i, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}
76 changes: 47 additions & 29 deletions msg.go
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ func compressedLen(dns *Msg, compress bool) int {
compression := map[string]int{}
for _, r := range dns.Question {
l += r.len()
compressionLenHelper(compression, r.Name)
compressionLenHelper(compression, r.Name, l)
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
Expand Down Expand Up @@ -962,70 +962,88 @@ func compressedLen(dns *Msg, compress bool) int {
return l
}

func compressionLenSlice(len int, c map[string]int, rs []RR) int {
var l int
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
initLen := lenp
for _, r := range rs {
if r == nil {
continue
}
// track this length, and the global length in len, while taking compression into account for both.
// TmpLen is to track len of record at 14bits boudaries, 6 bytes is the raw overhead
tmpLen := lenp + 6
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still have trouble understanding where this +6 comes from; what's it this? What bytes of the message are we skipping over here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't have access to the code easily from my phone, but I would say the signaling of a new record? (I am not at all an expert in DNS, I just read RFC part related to compression for this patch)

x := r.len()
l += x
len += x

k, ok := compressionLenSearch(c, r.Header().Name)
// track this length, and the global length in len, while taking compression into account for both.
k, ok, sz := compressionLenSearch(c, r.Header().Name)
if ok {
l += 1 - k
len += 1 - k
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
tmpLen += sz

if len < maxCompressionOffset {
compressionLenHelper(c, r.Header().Name)
}

k, ok = compressionLenSearchType(c, r)
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, added := compressionLenSearchType(c, r)
if ok {
l += 1 - k
len += 1 - k
}

if len < maxCompressionOffset {
compressionLenHelperType(c, r)
x += 1 - k
}
tmpLen += added
tmpLen += compressionLenHelperType(c, r, tmpLen)
lenp += x
}
return l
return lenp - initLen
}

// Put the parts of the name in the compression map.
func compressionLenHelper(c map[string]int, s string) {
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
addedSizeBytes := 0
pref := ""
lbs := Split(s)
for j := len(lbs) - 1; j >= 0; j-- {
pref = s[lbs[j]:]
if _, ok := c[pref]; !ok {
c[pref] = len(pref)
lenAdded := len(pref)
numLabelsBefore := len(lbs) - j - 1
// 2 bytes per label before this label to take into account
offsetOfLabel := currentLen + len(s) - lenAdded + numLabelsBefore*2
// If first byte label is within the first 14bits, it might be re-used later
if offsetOfLabel < maxCompressionOffset {
c[pref] = lenAdded
addedSizeBytes += 2 + lenAdded
}
}
}
return addedSizeBytes
}

// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
func compressionLenSearch(c map[string]int, s string) (int, bool) {
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true
return len(s[off:]), true, fullSize + off
}
if end {
break
}
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
}
return 0, false
return 0, false, fullSize + len(s)
}

// Copy returns a new RR which is a deep-copy of r.
Expand Down
Loading