2019-05-22 23:16:55 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2023-03-19 09:36:05 +01:00
|
|
|
"encoding/json"
|
2021-12-15 14:58:27 +01:00
|
|
|
"errors"
|
2019-05-22 23:16:55 +02:00
|
|
|
"fmt"
|
2021-12-15 14:58:27 +01:00
|
|
|
"os"
|
2019-05-22 23:16:55 +02:00
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
2021-12-15 14:58:27 +01:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
2023-03-19 09:36:05 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2022-02-11 15:17:00 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
2019-05-22 23:16:55 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// partHeader represents part header.
|
|
|
|
type partHeader struct {
|
|
|
|
// RowsCount is the total number of rows in the part.
|
|
|
|
RowsCount uint64
|
|
|
|
|
|
|
|
// BlocksCount is the total number of blocks in the part.
|
|
|
|
BlocksCount uint64
|
|
|
|
|
|
|
|
// MinTimestamp is the minimum timestamp in the part.
|
|
|
|
MinTimestamp int64
|
|
|
|
|
|
|
|
// MaxTimestamp is the maximum timestamp in the part.
|
|
|
|
MaxTimestamp int64
|
2021-12-15 14:58:27 +01:00
|
|
|
|
|
|
|
// MinDedupInterval is minimal dedup interval in milliseconds across all the blocks in the part.
|
|
|
|
MinDedupInterval int64
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// String returns string representation of ph.
|
|
|
|
func (ph *partHeader) String() string {
|
2023-03-19 09:36:05 +01:00
|
|
|
return fmt.Sprintf("partHeader{rowsCount=%d,blocksCount=%d,minTimestamp=%d,maxTimestamp=%d}", ph.RowsCount, ph.BlocksCount, ph.MinTimestamp, ph.MaxTimestamp)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
// Reset resets the ph.
|
|
|
|
func (ph *partHeader) Reset() {
|
|
|
|
ph.RowsCount = 0
|
|
|
|
ph.BlocksCount = 0
|
|
|
|
ph.MinTimestamp = (1 << 63) - 1
|
|
|
|
ph.MaxTimestamp = -1 << 63
|
|
|
|
ph.MinDedupInterval = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ph *partHeader) readMinDedupInterval(partPath string) error {
|
2023-03-25 22:33:54 +01:00
|
|
|
filePath := filepath.Join(partPath, "min_dedup_interval")
|
2023-03-19 09:36:05 +01:00
|
|
|
data, err := os.ReadFile(filePath)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
|
|
// The minimum dedup interval may not exist for old parts.
|
|
|
|
ph.MinDedupInterval = 0
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("cannot read %q: %w", filePath, err)
|
|
|
|
}
|
|
|
|
dedupInterval, err := promutils.ParseDuration(string(data))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot parse minimum dedup interval %q at %q: %w", data, filePath, err)
|
|
|
|
}
|
|
|
|
ph.MinDedupInterval = dedupInterval.Milliseconds()
|
|
|
|
return nil
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func fromUserReadableTimestamp(s string) (int64, error) {
|
|
|
|
t, err := time.Parse(userReadableTimeFormat, s)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return timestampFromTime(t), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const userReadableTimeFormat = "20060102150405.000"
|
|
|
|
|
|
|
|
// ParseFromPath extracts ph info from the given path.
|
|
|
|
func (ph *partHeader) ParseFromPath(path string) error {
|
|
|
|
ph.Reset()
|
|
|
|
|
|
|
|
path = filepath.Clean(path)
|
|
|
|
|
|
|
|
// Extract encoded part name.
|
2023-03-25 19:57:37 +01:00
|
|
|
partName := filepath.Base(path)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
// PartName must have the following form:
|
|
|
|
// RowsCount_BlocksCount_MinTimestamp_MaxTimestamp_Garbage
|
|
|
|
a := strings.Split(partName, "_")
|
|
|
|
if len(a) != 5 {
|
|
|
|
return fmt.Errorf("unexpected number of substrings in the part name %q: got %d; want %d", partName, len(a), 5)
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
ph.RowsCount, err = strconv.ParseUint(a[0], 10, 64)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot parse rowsCount from partName %q: %w", partName, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
ph.BlocksCount, err = strconv.ParseUint(a[1], 10, 64)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
ph.MinTimestamp, err = fromUserReadableTimestamp(a[2])
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot parse minTimestamp from partName %q: %w", partName, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
ph.MaxTimestamp, err = fromUserReadableTimestamp(a[3])
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot parse maxTimestamp from partName %q: %w", partName, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if ph.MinTimestamp > ph.MaxTimestamp {
|
|
|
|
return fmt.Errorf("minTimestamp cannot exceed maxTimestamp; got %d vs %d", ph.MinTimestamp, ph.MaxTimestamp)
|
|
|
|
}
|
|
|
|
if ph.RowsCount <= 0 {
|
|
|
|
return fmt.Errorf("rowsCount must be greater than 0; got %d", ph.RowsCount)
|
|
|
|
}
|
|
|
|
if ph.BlocksCount <= 0 {
|
|
|
|
return fmt.Errorf("blocksCount must be greater than 0; got %d", ph.BlocksCount)
|
|
|
|
}
|
|
|
|
if ph.BlocksCount > ph.RowsCount {
|
|
|
|
return fmt.Errorf("blocksCount cannot be bigger than rowsCount; got blocksCount=%d, rowsCount=%d", ph.BlocksCount, ph.RowsCount)
|
|
|
|
}
|
|
|
|
|
2021-12-15 14:58:27 +01:00
|
|
|
if err := ph.readMinDedupInterval(path); err != nil {
|
|
|
|
return fmt.Errorf("cannot read min dedup interval: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-04-15 00:46:09 +02:00
|
|
|
func (ph *partHeader) MustReadMetadata(partPath string) {
|
2023-03-19 09:36:05 +01:00
|
|
|
ph.Reset()
|
2021-12-15 14:58:27 +01:00
|
|
|
|
2023-03-25 22:33:54 +01:00
|
|
|
metadataPath := filepath.Join(partPath, metadataFilename)
|
2023-04-15 07:33:05 +02:00
|
|
|
if !fs.IsPathExist(metadataPath) {
|
|
|
|
// This is a part created before v1.90.0.
|
2024-03-30 02:54:54 +01:00
|
|
|
// Fall back to reading the metadata from the partPath itself.
|
2023-04-15 07:33:05 +02:00
|
|
|
if err := ph.ParseFromPath(partPath); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot parse metadata from %q: %s", partPath, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
metadata, err := os.ReadFile(metadataPath)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot read %q: %s", metadataPath, err)
|
|
|
|
}
|
|
|
|
if err := json.Unmarshal(metadata, ph); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot parse %q: %s", metadataPath, err)
|
2021-12-15 14:58:27 +01:00
|
|
|
}
|
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
|
|
|
|
// Perform various checks
|
|
|
|
if ph.MinTimestamp > ph.MaxTimestamp {
|
2023-04-15 00:46:09 +02:00
|
|
|
logger.Panicf("FATAL: minTimestamp cannot exceed maxTimestamp at %q; got %d vs %d", metadataPath, ph.MinTimestamp, ph.MaxTimestamp)
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
|
|
|
if ph.RowsCount <= 0 {
|
2023-04-15 00:46:09 +02:00
|
|
|
logger.Panicf("FATAL: rowsCount must be greater than 0 at %q; got %d", metadataPath, ph.RowsCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
|
|
|
if ph.BlocksCount <= 0 {
|
2023-04-15 00:46:09 +02:00
|
|
|
logger.Panicf("FATAL: blocksCount must be greater than 0 at %q; got %d", metadataPath, ph.BlocksCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
|
|
|
if ph.BlocksCount > ph.RowsCount {
|
2023-04-15 00:46:09 +02:00
|
|
|
logger.Panicf("FATAL: blocksCount cannot be bigger than rowsCount at %q; got blocksCount=%d, rowsCount=%d", metadataPath, ph.BlocksCount, ph.RowsCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
2021-12-15 14:58:27 +01:00
|
|
|
}
|
|
|
|
|
2023-04-14 06:33:15 +02:00
|
|
|
func (ph *partHeader) MustWriteMetadata(partPath string) {
|
2023-03-19 09:36:05 +01:00
|
|
|
metadata, err := json.Marshal(ph)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: cannot marshal partHeader metadata: %s", err)
|
|
|
|
}
|
2023-03-25 22:33:54 +01:00
|
|
|
metadataPath := filepath.Join(partPath, metadataFilename)
|
2023-04-14 07:41:12 +02:00
|
|
|
// There is no need in calling fs.MustWriteAtomic() here,
|
2023-11-21 11:20:43 +01:00
|
|
|
// since the file is created only once during part creating
|
|
|
|
// and the part directory is synced afterwards.
|
2023-04-14 06:42:11 +02:00
|
|
|
fs.MustWriteSync(metadataPath, metadata)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|