all: rename ReadAt* to MustReadAt* in order to dont clash with io.ReaderAt

This commit is contained in:
Aliaksandr Valialkin 2020-01-30 13:22:15 +02:00
parent d68546aa4a
commit ad8af629bb
7 changed files with 34 additions and 34 deletions

View File

@ -11,9 +11,9 @@ import (
var (
// Verify ByteBuffer implements the given interfaces.
_ io.Writer = &ByteBuffer{}
_ fs.ReadAtCloser = &ByteBuffer{}
_ io.ReaderFrom = &ByteBuffer{}
_ io.Writer = &ByteBuffer{}
_ fs.MustReadAtCloser = &ByteBuffer{}
_ io.ReaderFrom = &ByteBuffer{}
// Verify reader implement filestream.ReadCloser interface.
_ filestream.ReadCloser = &reader{}
@ -36,8 +36,8 @@ func (bb *ByteBuffer) Write(p []byte) (int, error) {
return len(p), nil
}
// ReadAt reads len(p) bytes starting from the given offset.
func (bb *ByteBuffer) ReadAt(p []byte, offset int64) {
// MustReadAt reads len(p) bytes starting from the given offset.
func (bb *ByteBuffer) MustReadAt(p []byte, offset int64) {
if offset < 0 {
logger.Panicf("BUG: cannot read at negative offset=%d", offset)
}

View File

@ -218,7 +218,7 @@ func TestByteBufferRead(t *testing.T) {
}
}
func TestByteBufferReadAt(t *testing.T) {
func TestByteBufferMustReadAt(t *testing.T) {
testStr := "foobar baz"
var bb ByteBuffer
@ -232,7 +232,7 @@ func TestByteBufferReadAt(t *testing.T) {
t.Fatalf("expecting non-nil error when reading at negative offset")
}
}()
bb.ReadAt(p, -1)
bb.MustReadAt(p, -1)
}()
// Try reading past the end of buffer
@ -242,18 +242,18 @@ func TestByteBufferReadAt(t *testing.T) {
t.Fatalf("expecting non-nil error when reading past the end of buffer")
}
}()
bb.ReadAt(p, int64(len(testStr))+1)
bb.MustReadAt(p, int64(len(testStr))+1)
}()
// Try reading the first byte
n := len(p)
bb.ReadAt(p, 0)
bb.MustReadAt(p, 0)
if string(p) != testStr[:n] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[:n])
}
// Try reading the last byte
bb.ReadAt(p, int64(len(testStr))-1)
bb.MustReadAt(p, int64(len(testStr))-1)
if string(p) != testStr[len(testStr)-1:] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[len(testStr)-1:])
}
@ -266,18 +266,18 @@ func TestByteBufferReadAt(t *testing.T) {
}
}()
p := make([]byte, 10)
bb.ReadAt(p, int64(len(testStr))-3)
bb.MustReadAt(p, int64(len(testStr))-3)
}()
// Try reading multiple bytes from the middle
p = make([]byte, 3)
bb.ReadAt(p, 2)
bb.MustReadAt(p, 2)
if string(p) != testStr[2:2+len(p)] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[2:2+len(p)])
}
}
func TestByteBufferReadAtParallel(t *testing.T) {
func TestByteBufferMustReadAtParallel(t *testing.T) {
ch := make(chan error, 10)
var bb ByteBuffer
bb.B = []byte("foo bar baz adsf adsf dsakjlkjlkj2l34324")
@ -285,7 +285,7 @@ func TestByteBufferReadAtParallel(t *testing.T) {
go func() {
p := make([]byte, 3)
for i := 0; i < len(bb.B)-len(p); i++ {
bb.ReadAt(p, int64(i))
bb.MustReadAt(p, int64(i))
}
ch <- nil
}()

View File

@ -14,10 +14,10 @@ import (
"golang.org/x/sys/unix"
)
// ReadAtCloser is rand-access read interface.
type ReadAtCloser interface {
// ReadAt must read len(p) bytes from offset off to p.
ReadAt(p []byte, off int64)
// MustReadAtCloser is rand-access read interface.
type MustReadAtCloser interface {
// MustReadAt must read len(p) bytes from offset off to p.
MustReadAt(p []byte, off int64)
// MustClose must close the reader.
MustClose()
@ -28,8 +28,8 @@ type ReaderAt struct {
f *os.File
}
// ReadAt reads len(p) bytes from off to p.
func (ra *ReaderAt) ReadAt(p []byte, off int64) {
// MustReadAt reads len(p) bytes from off to p.
func (ra *ReaderAt) MustReadAt(p []byte, off int64) {
if len(p) == 0 {
return
}

View File

@ -53,9 +53,9 @@ type part struct {
mrs []metaindexRow
indexFile fs.ReadAtCloser
itemsFile fs.ReadAtCloser
lensFile fs.ReadAtCloser
indexFile fs.MustReadAtCloser
itemsFile fs.MustReadAtCloser
lensFile fs.MustReadAtCloser
idxbCache *indexBlockCache
ibCache *inmemoryBlockCache
@ -107,7 +107,7 @@ func openFilePart(path string) (*part, error) {
return newPart(&ph, path, size, metaindexFile, indexFile, itemsFile, lensFile)
}
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, indexFile, itemsFile, lensFile fs.ReadAtCloser) (*part, error) {
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, indexFile, itemsFile, lensFile fs.MustReadAtCloser) (*part, error) {
var errors []error
mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
if err != nil {

View File

@ -311,7 +311,7 @@ func (ps *partSearch) getIndexBlock(mr *metaindexRow) (*indexBlock, bool, error)
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf, int(mr.indexBlockSize))
ps.p.indexFile.ReadAt(ps.compressedIndexBuf, int64(mr.indexBlockOffset))
ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.indexBlockOffset))
var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
@ -355,10 +355,10 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
ps.sb.Reset()
ps.sb.itemsData = bytesutil.Resize(ps.sb.itemsData, int(bh.itemsBlockSize))
ps.p.itemsFile.ReadAt(ps.sb.itemsData, int64(bh.itemsBlockOffset))
ps.p.itemsFile.MustReadAt(ps.sb.itemsData, int64(bh.itemsBlockOffset))
ps.sb.lensData = bytesutil.Resize(ps.sb.lensData, int(bh.lensBlockSize))
ps.p.lensFile.ReadAt(ps.sb.lensData, int64(bh.lensBlockOffset))
ps.p.lensFile.MustReadAt(ps.sb.lensData, int64(bh.lensBlockOffset))
ib := getInmemoryBlock()
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {

View File

@ -40,9 +40,9 @@ type part struct {
// Total size in bytes of part data.
size uint64
timestampsFile fs.ReadAtCloser
valuesFile fs.ReadAtCloser
indexFile fs.ReadAtCloser
timestampsFile fs.MustReadAtCloser
valuesFile fs.MustReadAtCloser
indexFile fs.MustReadAtCloser
metaindex []metaindexRow
@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) {
//
// The returned part calls MustClose on all the files passed to newPart
// when calling part.MustClose.
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, timestampsFile, valuesFile, indexFile fs.ReadAtCloser) (*part, error) {
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, timestampsFile, valuesFile, indexFile fs.MustReadAtCloser) (*part, error) {
var errors []error
metaindex, err := unmarshalMetaindexRows(nil, metaindexReader)
if err != nil {

View File

@ -229,7 +229,7 @@ func skipSmallMetaindexRows(metaindex []metaindexRow, tsid *TSID) []metaindexRow
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf[:0], int(mr.IndexBlockSize))
ps.p.indexFile.ReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset))
ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset))
var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
@ -302,8 +302,8 @@ func (ps *partSearch) readBlock(bh *blockHeader) {
}
ps.Block.timestampsData = bytesutil.Resize(ps.Block.timestampsData[:0], int(bh.TimestampsBlockSize))
ps.p.timestampsFile.ReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset))
ps.p.timestampsFile.MustReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset))
ps.Block.valuesData = bytesutil.Resize(ps.Block.valuesData[:0], int(bh.ValuesBlockSize))
ps.p.valuesFile.ReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset))
ps.p.valuesFile.MustReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset))
}