Skip to content
This repository has been archived by the owner on Aug 13, 2019. It is now read-only.

Commit

Permalink
Merge pull request #307 from mjtrangoni/fixes
Browse files Browse the repository at this point in the history
Fix some megacheck and unconvert issues
  • Loading branch information
gouthamve authored Apr 3, 2018
2 parents 2f37e1e + e5dabad commit 8a301b1
Show file tree
Hide file tree
Showing 7 changed files with 19 additions and 21 deletions.
2 changes: 1 addition & 1 deletion block.go
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (bool, error) {
numStones := 0

pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
for _ = range ivs {
for range ivs {
numStones++
}

Expand Down
2 changes: 1 addition & 1 deletion chunkenc/xor.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ func (it *xorIterator) Next() bool {
it.err = err
return false
}
it.t = int64(t)
it.t = t
it.val = math.Float64frombits(v)

it.numRead++
Expand Down
4 changes: 2 additions & 2 deletions chunks/chunks.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func (w *Writer) finalizeTail() error {
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
off, err := tf.Seek(0, os.SEEK_CUR)
off, err := tf.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
Expand Down Expand Up @@ -349,7 +349,7 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
}
b := s.bs[seq]

if int(off) >= b.Len() {
if off >= b.Len() {
return nil, errors.Errorf("offset %d beyond data size %d", off, b.Len())
}
// With the minimum chunk length this should never cause us reading
Expand Down
7 changes: 2 additions & 5 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -725,10 +725,7 @@ func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error {
g.Go(func() error {
return db.head.Delete(mint, maxt, ms...)
})
if err := g.Wait(); err != nil {
return err
}
return nil
return g.Wait()
}

// CleanTombstones re-writes any blocks with tombstones.
Expand All @@ -737,7 +734,7 @@ func (db *DB) CleanTombstones() error {
defer db.cmtx.Unlock()

start := time.Now()
defer db.metrics.tombCleanTimer.Observe(float64(time.Since(start).Seconds()))
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())

db.mtx.RLock()
blocks := db.blocks[:]
Expand Down
8 changes: 4 additions & 4 deletions index/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -780,7 +780,7 @@ func (r *Reader) readSymbols(off int) error {

for d.err() == nil && d.len() > 0 && cnt > 0 {
s := d.uvarintStr()
r.symbols[uint32(nextPos)] = s
r.symbols[nextPos] = s

if r.version == 2 {
nextPos++
Expand All @@ -800,7 +800,7 @@ func (r *Reader) readOffsetTable(off uint64, f func([]string, uint64) error) err
cnt := d.be32()

for d.err() == nil && d.len() > 0 && cnt > 0 {
keyCount := int(d.uvarint())
keyCount := d.uvarint()
keys := make([]string, 0, keyCount)

for i := 0; i < keyCount; i++ {
Expand Down Expand Up @@ -1038,7 +1038,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e

d := decbuf{b: b}

k := int(d.uvarint())
k := d.uvarint()

for i := 0; i < k; i++ {
lno := uint32(d.uvarint())
Expand All @@ -1061,7 +1061,7 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e
}

// Read the chunks meta data.
k = int(d.uvarint())
k = d.uvarint()

if k == 0 {
return nil
Expand Down
8 changes: 4 additions & 4 deletions wal.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error {
w.files = w.files[:file+1]

// Seek the current file to the last valid offset where we continue writing from.
_, err = w.files[file].Seek(lastOffset, os.SEEK_SET)
_, err = w.files[file].Seek(lastOffset, io.SeekStart)
return err
}

Expand Down Expand Up @@ -393,7 +393,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
return errors.Wrap(r.Err(), "read candidate WAL files")
}

off, err := csf.Seek(0, os.SEEK_CUR)
off, err := csf.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
Expand Down Expand Up @@ -583,7 +583,7 @@ func (w *SegmentWAL) cut() error {
// in the new segment.
go func() {
w.actorc <- func() error {
off, err := hf.Seek(0, os.SEEK_CUR)
off, err := hf.Seek(0, io.SeekCurrent)
if err != nil {
return errors.Wrapf(err, "finish old segment %s", hf.Name())
}
Expand Down Expand Up @@ -1024,7 +1024,7 @@ func (r *walReader) next() bool {

// Remember the offset after the last correctly read entry. If the next one
// is corrupted, this is where we can safely truncate.
r.lastOffset, r.err = cf.Seek(0, os.SEEK_CUR)
r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent)
if r.err != nil {
return false
}
Expand Down
9 changes: 5 additions & 4 deletions wal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package tsdb

import (
"encoding/binary"
"io"
"io/ioutil"
"math/rand"
"os"
Expand Down Expand Up @@ -305,7 +306,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
testutil.Ok(t, err)
defer f.Close()

off, err := f.Seek(0, os.SEEK_END)
off, err := f.Seek(0, io.SeekEnd)
testutil.Ok(t, err)

testutil.Ok(t, f.Truncate(off-1))
Expand All @@ -318,7 +319,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
testutil.Ok(t, err)
defer f.Close()

off, err := f.Seek(0, os.SEEK_END)
off, err := f.Seek(0, io.SeekEnd)
testutil.Ok(t, err)

testutil.Ok(t, f.Truncate(off-8))
Expand All @@ -331,7 +332,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
testutil.Ok(t, err)
defer f.Close()

off, err := f.Seek(0, os.SEEK_END)
off, err := f.Seek(0, io.SeekEnd)
testutil.Ok(t, err)

// Write junk before checksum starts.
Expand All @@ -346,7 +347,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
testutil.Ok(t, err)
defer f.Close()

off, err := f.Seek(0, os.SEEK_END)
off, err := f.Seek(0, io.SeekEnd)
testutil.Ok(t, err)

// Write junk into checksum
Expand Down

0 comments on commit 8a301b1

Please sign in to comment.