Merge tag 'v1.107.0' into pmm-6401-read-prometheus-data-files-cpc

This commit is contained in:
f41gh7 2024-12-02 11:34:23 +01:00
commit d6cb7d09e5
No known key found for this signature in database
GPG Key ID: 4558311CF775EC72
1603 changed files with 437056 additions and 30801 deletions

View File

@ -88,6 +88,35 @@ jobs:
run: make ${{ matrix.scenario}}
- name: Publish coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
with:
file: ./coverage.txt
integration-test:
name: integration-test
needs: [lint, test]
runs-on: ubuntu-latest
steps:
- name: Code checkout
uses: actions/checkout@v4
- name: Setup Go
id: go
uses: actions/setup-go@v5
with:
cache: false
go-version: stable
- name: Cache Go artifacts
uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/bin
~/go/pkg/mod
key: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', 'Makefile', 'app/**/Makefile') }}
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
- name: Run integration tests
run: make integration-test

View File

@ -10,7 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
@ -48,7 +48,7 @@ func selfScraper(scrapeInterval time.Duration) {
var bb bytesutil.ByteBuffer
var rows prometheus.Rows
var mrs []storage.MetricRow
var labels []prompb.Label
var labels []prompbmarshal.Label
t := time.NewTicker(scrapeInterval)
f := func(currentTime time.Time, sendStaleMarkers bool) {
currentTimestamp := currentTime.UnixNano() / 1e6
@ -99,11 +99,11 @@ func selfScraper(scrapeInterval time.Duration) {
}
}
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {
func addLabel(dst []prompbmarshal.Label, key, value string) []prompbmarshal.Label {
if len(dst) < cap(dst) {
dst = dst[:len(dst)+1]
} else {
dst = append(dst, prompb.Label{})
dst = append(dst, prompbmarshal.Label{})
}
lb := &dst[len(dst)-1]
lb.Name = key

View File

@ -498,7 +498,7 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
httpserver.Errorf(w, r, `unsupported multitenant prefix: %q; expected "insert"`, p.Prefix)
return true
}
at, err := auth.NewToken(p.AuthToken)
at, err := auth.NewTokenPossibleMultitenant(p.AuthToken)
if err != nil {
httpserver.Errorf(w, r, "cannot obtain auth token: %s", err)
return true
@ -510,7 +510,13 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
httpserver.Errorf(w, r, "%s", err)
return true
}
w.WriteHeader(http.StatusNoContent)
statusCode := http.StatusNoContent
if strings.HasPrefix(p.Suffix, "prometheus/api/v1/import/prometheus/metrics/job/") {
// Return 200 status code for pushgateway requests.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3636
statusCode = http.StatusOK
}
w.WriteHeader(statusCode)
return true
}
if strings.HasPrefix(p.Suffix, "datadog/") {

View File

@ -99,9 +99,6 @@ var (
// rwctxsGlobal contains statically populated entries when -remoteWrite.url is specified.
rwctxsGlobal []*remoteWriteCtx
// Data without tenant id is written to defaultAuthToken if -enableMultitenantHandlers is specified.
defaultAuthToken = &auth.Token{}
// ErrQueueFullHTTPRetry must be returned when TryPush() returns false.
ErrQueueFullHTTPRetry = &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("remote storage systems cannot keep up with the data ingestion rate; retry the request later " +
@ -209,7 +206,7 @@ func Init() {
initStreamAggrConfigGlobal()
rwctxsGlobal = newRemoteWriteCtxs(nil, *remoteWriteURLs)
rwctxsGlobal = newRemoteWriteCtxs(*remoteWriteURLs)
disableOnDiskQueues := []bool(*disableOnDiskQueue)
disableOnDiskQueueAny = slices.Contains(disableOnDiskQueues, true)
@ -294,7 +291,7 @@ var (
relabelConfigTimestamp = metrics.NewCounter(`vmagent_relabel_config_last_reload_success_timestamp_seconds`)
)
func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
func newRemoteWriteCtxs(urls []string) []*remoteWriteCtx {
if len(urls) == 0 {
logger.Panicf("BUG: urls must be non-empty")
}
@ -316,11 +313,6 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
logger.Fatalf("invalid -remoteWrite.url=%q: %s", remoteWriteURL, err)
}
sanitizedURL := fmt.Sprintf("%d:secret-url", i+1)
if at != nil {
// Construct full remote_write url for the given tenant according to https://docs.victoriametrics.com/cluster-victoriametrics/#url-format
remoteWriteURL.Path = fmt.Sprintf("%s/insert/%d:%d/prometheus/api/v1/write", remoteWriteURL.Path, at.AccountID, at.ProjectID)
sanitizedURL = fmt.Sprintf("%s:%d:%d", sanitizedURL, at.AccountID, at.ProjectID)
}
if *showRemoteWriteURL {
sanitizedURL = fmt.Sprintf("%d:%s", i+1, remoteWriteURL)
}
@ -411,11 +403,6 @@ func TryPush(at *auth.Token, wr *prompbmarshal.WriteRequest) bool {
func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnFailure bool) bool {
tss := wr.Timeseries
if at == nil && MultitenancyEnabled() {
// Write data to default tenant if at isn't set when multitenancy is enabled.
at = defaultAuthToken
}
var tenantRctx *relabelCtx
if at != nil {
// Convert at to (vm_account_id, vm_project_id) labels.

View File

@ -0,0 +1,4 @@
rule_files:
- non-existing-file.yaml
tests: []

View File

@ -74,8 +74,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
logger.Fatalf("failed to load test files %q: %v", files, err)
}
if len(testfiles) == 0 {
fmt.Println("no test file found")
return false
logger.Fatalf("no test file found")
}
labels := make(map[string]string)
@ -97,8 +96,8 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
var failed bool
for fileName, file := range testfiles {
if err := ruleUnitTest(fileName, file, labels); err != nil {
fmt.Println(" FAILED")
fmt.Printf("\nfailed to run unit test for file %q: \n%v", fileName, err)
fmt.Println("FAILED")
fmt.Printf("failed to run unit test for file %q: \n%v", fileName, err)
failed = true
} else {
fmt.Println(" SUCCESS")
@ -109,7 +108,7 @@ func UnitTest(files []string, disableGroupLabel bool, externalLabels []string, e
}
func ruleUnitTest(filename string, content []byte, externalLabels map[string]string) []error {
fmt.Println("\nUnit Testing: ", filename)
fmt.Println("\n\nUnit Testing: ", filename)
var unitTestInp unitTestFile
if err := yaml.UnmarshalStrict(content, &unitTestInp); err != nil {
return []error{fmt.Errorf("failed to unmarshal file: %w", err)}
@ -139,6 +138,9 @@ func ruleUnitTest(filename string, content []byte, externalLabels map[string]str
if err != nil {
return []error{fmt.Errorf("failed to parse `rule_files`: %w", err)}
}
if len(testGroups) == 0 {
return []error{fmt.Errorf("found no rule group in %v", unitTestInp.RuleFiles)}
}
var errs []error
for _, t := range unitTestInp.Tests {

View File

@ -24,7 +24,8 @@ func TestUnitTest_Failure(t *testing.T) {
}
}
// failing test
f([]string{"./testdata/failed-test-with-missing-rulefile.yaml"})
f([]string{"./testdata/failed-test.yaml"})
}

View File

@ -11,7 +11,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
)
@ -48,16 +47,9 @@ var (
oauth2TokenURL = flag.String("datasource.oauth2.tokenUrl", "", "Optional OAuth2 tokenURL to use for -datasource.url")
oauth2Scopes = flag.String("datasource.oauth2.scopes", "", "Optional OAuth2 scopes to use for -datasource.url. Scopes must be delimited by ';'")
lookBack = flag.Duration("datasource.lookback", 0, `Deprecated: please adjust "-search.latencyOffset" at datasource side `+
`or specify "latency_offset" in rule group's params. Lookback defines how far into the past to look when evaluating queries. `+
`For example, if the datasource.lookback=5m then param "time" with value now()-5m will be added to every query.`)
queryStep = flag.Duration("datasource.queryStep", 5*time.Minute, "How far a value can fallback to when evaluating queries to the configured -datasource.url and -remoteRead.url. Only valid for prometheus datasource. "+
"For example, if -datasource.queryStep=15s then param \"step\" with value \"15s\" will be added to every query. "+
"If set to 0, rule's evaluation interval will be used instead.")
queryTimeAlignment = flag.Bool("datasource.queryTimeAlignment", true, `Deprecated: please use "eval_alignment" in rule group instead. `+
`Whether to align "time" parameter with evaluation interval. `+
"Alignment supposed to produce deterministic results despite number of vmalert replicas or time they were started. "+
"See more details at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1257")
maxIdleConnections = flag.Int("datasource.maxIdleConnections", 100, `Defines the number of idle (keep-alive connections) to each configured datasource. Consider setting this value equal to the value: groups_total * group.concurrency. Too low a value may result in a high number of sockets in TIME_WAIT state.`)
idleConnectionTimeout = flag.Duration("datasource.idleConnTimeout", 50*time.Second, `Defines a duration for idle (keep-alive connections) to exist. Consider setting this value less than "-http.idleConnTimeout". It must prevent possible "write: broken pipe" and "read: connection reset by peer" errors.`)
disableKeepAlive = flag.Bool("datasource.disableKeepAlive", false, `Whether to disable long-lived connections to the datasource. `+
@ -90,12 +82,6 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
if *addr == "" {
return nil, fmt.Errorf("datasource.url is empty")
}
if !*queryTimeAlignment {
logger.Warnf("flag `-datasource.queryTimeAlignment` is deprecated and will be removed in next releases. Please use `eval_alignment` in rule group instead.")
}
if *lookBack != 0 {
logger.Warnf("flag `-datasource.lookback` is deprecated and will be removed in next releases. Please adjust `-search.latencyOffset` at datasource side or specify `latency_offset` in rule group's params. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
}
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {

View File

@ -78,8 +78,6 @@ absolute path to all .tpl files in root.
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
"In case of conflicts, original labels are kept with prefix `exported_`.")
remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup. DEPRECATED - this flag has no effect and will be removed in the next releases.")
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The -rule flag must be specified.")
)
@ -97,10 +95,6 @@ func main() {
buildinfo.Init()
logger.Init()
if !*remoteReadIgnoreRestoreErrors {
logger.Warnf("flag `remoteRead.ignoreRestoreErrors` is deprecated and will be removed in next releases.")
}
err := templates.Load(*ruleTemplatesPath, true)
if err != nil {
logger.Fatalf("failed to parse %q: %s", *ruleTemplatesPath, err)

View File

@ -14,7 +14,7 @@ import (
)
var (
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with Prometheus HTTP API. It can be single node VictoriaMetrics or vmselect."+
addr = flag.String("remoteRead.url", "", "Optional URL to datasource compatible with MetricsQL. It can be single node VictoriaMetrics or vmselect."+
"Remote read is used to restore alerts state."+
"This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
"Supports address in the form of IP address with a port (e.g., http://127.0.0.1:8428) or DNS SRV record. "+

View File

@ -27,7 +27,7 @@ var defaultConcurrency = cgroup.AvailableCPUs() * 2
const (
defaultMaxBatchSize = 1e4
defaultMaxQueueSize = 1e6
defaultMaxQueueSize = 1e5
defaultFlushInterval = 2 * time.Second
defaultWriteTimeout = 30 * time.Second
)

View File

@ -711,7 +711,8 @@ func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts ti
for k, v := range ar.Labels {
labelsFilter += fmt.Sprintf(",%s=%q", k, v)
}
expr := fmt.Sprintf("last_over_time(%s{%s%s}[%ds])",
// use `default_rollup()` instead of `last_over_time()` here to accounts for possible staleness markers
expr := fmt.Sprintf("default_rollup(%s{%s%s}[%ds])",
alertForStateMetricName, nameStr, labelsFilter, int(lookback.Seconds()))
res, _, err := q.Query(ctx, expr, ts)

View File

@ -791,7 +791,7 @@ func TestGroup_Restore(t *testing.T) {
// one active alert with state restore
ts := time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
stateMetric("foo", ts))
fn(
[]config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}},
@ -804,7 +804,7 @@ func TestGroup_Restore(t *testing.T) {
// two rules, two active alerts, one with state restored
ts = time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
stateMetric("bar", ts))
fn(
[]config.Rule{
@ -824,9 +824,9 @@ func TestGroup_Restore(t *testing.T) {
// two rules, two active alerts, two with state restored
ts = time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo"}[3600s])`,
stateMetric("foo", ts))
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="bar"}[3600s])`,
stateMetric("bar", ts))
fn(
[]config.Rule{
@ -846,7 +846,7 @@ func TestGroup_Restore(t *testing.T) {
// one active alert but wrong state restore
ts = time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertname="bar",alertgroup="TestRestore"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertname="bar",alertgroup="TestRestore"}[3600s])`,
stateMetric("wrong alert", ts))
fn(
[]config.Rule{{Alert: "foo", Expr: "foo", For: promutils.NewDuration(time.Second)}},
@ -859,7 +859,7 @@ func TestGroup_Restore(t *testing.T) {
// one active alert with labels
ts = time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
stateMetric("foo", ts, "env", "dev"))
fn(
[]config.Rule{{Alert: "foo", Expr: "foo", Labels: map[string]string{"env": "dev"}, For: promutils.NewDuration(time.Second)}},
@ -872,7 +872,7 @@ func TestGroup_Restore(t *testing.T) {
// one active alert with restore labels missmatch
ts = time.Now().Truncate(time.Hour)
fqr.Set(`last_over_time(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
fqr.Set(`default_rollup(ALERTS_FOR_STATE{alertgroup="TestRestore",alertname="foo",env="dev"}[3600s])`,
stateMetric("foo", ts, "env", "dev", "team", "foo"))
fn(
[]config.Rule{{Alert: "foo", Expr: "foo", Labels: map[string]string{"env": "dev"}, For: promutils.NewDuration(time.Second)}},

View File

@ -67,6 +67,7 @@ type UserInfo struct {
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
DiscoverBackendIPs *bool `yaml:"discover_backend_ips,omitempty"`
URLMaps []URLMap `yaml:"url_map,omitempty"`
DumpRequestOnErrors bool `yaml:"dump_request_on_errors,omitempty"`
HeadersConf HeadersConf `yaml:",inline"`
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
DefaultURL *URLPrefix `yaml:"default_url,omitempty"`

View File

@ -22,26 +22,26 @@ users:
# - or http://default2:8888/unsupported_url_handler?request_path=/non/existing/path
#
# Regular expressions are allowed in `src_paths` entries.
- username: "foobar"
url_map:
- src_paths:
- "/api/v1/query"
- "/api/v1/query_range"
- "/api/v1/label/[^/]+/values"
url_prefix:
- "http://vmselect1:8481/select/42/prometheus"
- "http://vmselect2:8481/select/42/prometheus"
- src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus"
headers:
- "X-Scope-OrgID: abc"
- username: "foobar"
ip_filters:
deny_list: [127.0.0.1]
default_url:
- "http://default1:8888/unsupported_url_handler"
- "http://default2:8888/unsupported_url_handler"
url_map:
- src_paths:
- "/api/v1/query"
- "/api/v1/query_range"
- "/api/v1/label/[^/]+/values"
url_prefix:
- "http://vmselect1:8481/select/42/prometheus"
- "http://vmselect2:8481/select/42/prometheus"
- src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus"
headers:
- "X-Scope-OrgID: abc"
default_url:
- "http://default1:8888/unsupported_url_handler"
- "http://default2:8888/unsupported_url_handler"
ip_filters:
allow_list: ["1.2.3.0/24", "127.0.0.1"]
deny_list:
- 10.1.0.1
- 10.1.0.1

View File

@ -61,6 +61,9 @@ var (
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
backendTLSServerName = flag.String("backend.TLSServerName", "", "Optional TLS ServerName, which must be sent to HTTPS backend. "+
"See https://docs.victoriametrics.com/vmauth/#backend-tls-setup")
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmauth. The auth configuration file is validated. The -auth.config flag must be specified.")
removeXFFHTTPHeaderValue = flag.Bool(`removeXFFHTTPHeaderValue`, false, "Whether to remove the X-Forwarded-For HTTP header value from client requests before forwarding them to the backend. "+
"Recommended when vmauth is exposed to the internet.")
)
func main() {
@ -71,6 +74,16 @@ func main() {
buildinfo.Init()
logger.Init()
if *dryRun {
if len(*authConfigPath) == 0 {
logger.Fatalf("missing required `-auth.config` command-line flag")
}
if _, err := reloadAuthConfig(); err != nil {
logger.Fatalf("failed to parse %q: %s", *authConfigPath, err)
}
return
}
listenAddrs := *httpListenAddrs
if len(listenAddrs) == 0 {
listenAddrs = []string{":8427"}
@ -198,7 +211,11 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
return
}
missingRouteRequests.Inc()
httpserver.Errorf(w, r, "missing route for %s", u.String())
var di string
if ui.DumpRequestOnErrors {
di = debugInfo(u, r.Header)
}
httpserver.Errorf(w, r, "missing route for %q%s", u.String(), di)
return
}
up, hc = ui.DefaultURL, ui.HeadersConf
@ -377,7 +394,7 @@ func sanitizeRequestHeaders(r *http.Request) *http.Request {
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
prior := req.Header["X-Forwarded-For"]
if len(prior) > 0 {
if len(prior) > 0 && !*removeXFFHTTPHeaderValue {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
req.Header.Set("X-Forwarded-For", clientIP)
@ -650,3 +667,14 @@ func (rtb *readTrackingBody) Close() error {
return nil
}
func debugInfo(u *url.URL, h http.Header) string {
s := &strings.Builder{}
fmt.Fprintf(s, " (host: %q; ", u.Host)
fmt.Fprintf(s, "path: %q; ", u.Path)
fmt.Fprintf(s, "args: %q; ", u.Query().Encode())
fmt.Fprint(s, "headers:")
_ = h.WriteSubset(s, nil)
fmt.Fprint(s, ")")
return s.String()
}

View File

@ -360,7 +360,27 @@ unauthorized_user:
}
responseExpected = `
statusCode=400
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for http://some-host.com/abc?de=fg`
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for "http://some-host.com/abc?de=fg"`
f(cfgStr, requestURL, backendHandler, responseExpected)
// missing default_url and default url_prefix for unauthorized user with dump_request_on_errors enabled
cfgStr = `
unauthorized_user:
dump_request_on_errors: true
url_map:
- src_paths: ["/foo/.+"]
url_prefix: {BACKEND}/x-foo/`
requestURL = "http://some-host.com/abc?de=fg"
backendHandler = func(_ http.ResponseWriter, _ *http.Request) {
panic(fmt.Errorf("backend handler shouldn't be called"))
}
responseExpected = `
statusCode=400
remoteAddr: "42.2.3.84:6789, X-Forwarded-For: 12.34.56.78"; requestURI: /abc?de=fg; missing route for "http://some-host.com/abc?de=fg" (host: "some-host.com"; path: "/abc"; args: "de=fg"; headers:Connection: Some-Header,Other-Header
Pass-Header: abc
Some-Header: foobar
X-Forwarded-For: 12.34.56.78
)`
f(cfgStr, requestURL, backendHandler, responseExpected)
// missing default_url and default url_prefix for unauthorized user when there are configs for authorized users

View File

@ -1,348 +1,348 @@
package main
import (
"context"
"net/http"
"testing"
"time"
"github.com/prometheus/prometheus/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
)
func TestRemoteRead(t *testing.T) {
barpool.Disable(true)
defer func() {
barpool.Disable(false)
}()
defer func() { isSilent = false }()
var testCases = []struct {
name string
remoteReadConfig remoteread.Config
vmCfg vm.Config
start string
end string
numOfSamples int64
numOfSeries int64
rrp remoteReadProcessor
chunk string
remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
expectedSeries []vm.TimeSeries
}{
{
name: "step minute on minute time range",
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
vmCfg: vm.Config{Addr: "", Concurrency: 1},
start: "2022-11-26T11:23:05+02:00",
end: "2022-11-26T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMinute,
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{0, 0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{100, 100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{200, 200},
},
},
},
{
name: "step month on month time range",
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
vmCfg: vm.Config{Addr: "", Concurrency: 1,
Transport: http.DefaultTransport.(*http.Transport)},
start: "2022-09-26T11:23:05+02:00",
end: "2022-11-26T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMonth,
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1664184185000},
Values: []float64{0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1664184185000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1664184185000},
Values: []float64{200},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1666819415000},
Values: []float64{0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1666819415000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1666819415000},
Values: []float64{200}},
},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
remoteReadServer := remote_read_integration.NewRemoteReadServer(t)
defer remoteReadServer.Close()
remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
defer remoteWriteServer.Close()
tt.remoteReadConfig.Addr = remoteReadServer.URL()
rr, err := remoteread.NewClient(tt.remoteReadConfig)
if err != nil {
t.Fatalf("error create remote read client: %s", err)
}
start, err := time.Parse(time.RFC3339, tt.start)
if err != nil {
t.Fatalf("Error parse start time: %s", err)
}
end, err := time.Parse(time.RFC3339, tt.end)
if err != nil {
t.Fatalf("Error parse end time: %s", err)
}
rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
remoteReadServer.SetRemoteReadSeries(rrs)
remoteWriteServer.ExpectedSeries(tt.expectedSeries)
tt.vmCfg.Addr = remoteWriteServer.URL()
b, err := backoff.New(10, 1.8, time.Second*2)
if err != nil {
t.Fatalf("failed to create backoff: %s", err)
}
tt.vmCfg.Backoff = b
importer, err := vm.NewImporter(ctx, tt.vmCfg)
if err != nil {
t.Fatalf("failed to create VM importer: %s", err)
}
defer importer.Close()
rmp := remoteReadProcessor{
src: rr,
dst: importer,
filter: remoteReadFilter{
timeStart: &start,
timeEnd: &end,
chunk: tt.chunk,
},
cc: 1,
isVerbose: false,
}
err = rmp.run(ctx)
if err != nil {
t.Fatalf("failed to run remote read processor: %s", err)
}
})
}
}
func TestSteamRemoteRead(t *testing.T) {
barpool.Disable(true)
defer func() {
barpool.Disable(false)
}()
defer func() { isSilent = false }()
var testCases = []struct {
name string
remoteReadConfig remoteread.Config
vmCfg vm.Config
start string
end string
numOfSamples int64
numOfSeries int64
rrp remoteReadProcessor
chunk string
remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
expectedSeries []vm.TimeSeries
}{
{
name: "step minute on minute time range",
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
vmCfg: vm.Config{Addr: "", Concurrency: 1},
start: "2022-11-26T11:23:05+02:00",
end: "2022-11-26T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMinute,
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{0, 0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{100, 100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1669454585000, 1669454615000},
Values: []float64{200, 200},
},
},
},
{
name: "step month on month time range",
remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
vmCfg: vm.Config{Addr: "", Concurrency: 1},
start: "2022-09-26T11:23:05+02:00",
end: "2022-11-26T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMonth,
remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1664184185000},
Values: []float64{0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1664184185000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1664184185000},
Values: []float64{200},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1666819415000},
Values: []float64{0},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1666819415000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1666819415000},
Values: []float64{200}},
},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
remoteReadServer := remote_read_integration.NewRemoteReadStreamServer(t)
defer remoteReadServer.Close()
remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
defer remoteWriteServer.Close()
tt.remoteReadConfig.Addr = remoteReadServer.URL()
rr, err := remoteread.NewClient(tt.remoteReadConfig)
if err != nil {
t.Fatalf("error create remote read client: %s", err)
}
start, err := time.Parse(time.RFC3339, tt.start)
if err != nil {
t.Fatalf("Error parse start time: %s", err)
}
end, err := time.Parse(time.RFC3339, tt.end)
if err != nil {
t.Fatalf("Error parse end time: %s", err)
}
rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
remoteReadServer.InitMockStorage(rrs)
remoteWriteServer.ExpectedSeries(tt.expectedSeries)
tt.vmCfg.Addr = remoteWriteServer.URL()
b, err := backoff.New(10, 1.8, time.Second*2)
if err != nil {
t.Fatalf("failed to create backoff: %s", err)
}
tt.vmCfg.Backoff = b
importer, err := vm.NewImporter(ctx, tt.vmCfg)
if err != nil {
t.Fatalf("failed to create VM importer: %s", err)
}
defer importer.Close()
rmp := remoteReadProcessor{
src: rr,
dst: importer,
filter: remoteReadFilter{
timeStart: &start,
timeEnd: &end,
chunk: tt.chunk,
},
cc: 1,
isVerbose: false,
}
err = rmp.run(ctx)
if err != nil {
t.Fatalf("failed to run remote read processor: %s", err)
}
})
}
}
// import (
// "context"
// "net/http"
// "testing"
// "time"
//
// "github.com/prometheus/prometheus/prompb"
//
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
// )
//
// func TestRemoteRead(t *testing.T) {
// barpool.Disable(true)
// defer func() {
// barpool.Disable(false)
// }()
// defer func() { isSilent = false }()
//
// var testCases = []struct {
// name string
// remoteReadConfig remoteread.Config
// vmCfg vm.Config
// start string
// end string
// numOfSamples int64
// numOfSeries int64
// rrp remoteReadProcessor
// chunk string
// remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
// expectedSeries []vm.TimeSeries
// }{
// {
// name: "step minute on minute time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
// vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-11-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2,
// numOfSeries: 3,
// chunk: stepper.StepMinute,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{0, 0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{100, 100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{200, 200},
// },
// },
// },
// {
// name: "step month on month time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
// vmCfg: vm.Config{Addr: "", Concurrency: 1,
// Transport: http.DefaultTransport.(*http.Transport)},
// start: "2022-09-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2,
// numOfSeries: 3,
// chunk: stepper.StepMonth,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{200},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{200}},
// },
// },
// }
//
// for _, tt := range testCases {
// t.Run(tt.name, func(t *testing.T) {
// ctx := context.Background()
// remoteReadServer := remote_read_integration.NewRemoteReadServer(t)
// defer remoteReadServer.Close()
// remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
// defer remoteWriteServer.Close()
//
// tt.remoteReadConfig.Addr = remoteReadServer.URL()
//
// rr, err := remoteread.NewClient(tt.remoteReadConfig)
// if err != nil {
// t.Fatalf("error create remote read client: %s", err)
// }
//
// start, err := time.Parse(time.RFC3339, tt.start)
// if err != nil {
// t.Fatalf("Error parse start time: %s", err)
// }
//
// end, err := time.Parse(time.RFC3339, tt.end)
// if err != nil {
// t.Fatalf("Error parse end time: %s", err)
// }
//
// rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
//
// remoteReadServer.SetRemoteReadSeries(rrs)
// remoteWriteServer.ExpectedSeries(tt.expectedSeries)
//
// tt.vmCfg.Addr = remoteWriteServer.URL()
//
// b, err := backoff.New(10, 1.8, time.Second*2)
// if err != nil {
// t.Fatalf("failed to create backoff: %s", err)
// }
// tt.vmCfg.Backoff = b
//
// importer, err := vm.NewImporter(ctx, tt.vmCfg)
// if err != nil {
// t.Fatalf("failed to create VM importer: %s", err)
// }
// defer importer.Close()
//
// rmp := remoteReadProcessor{
// src: rr,
// dst: importer,
// filter: remoteReadFilter{
// timeStart: &start,
// timeEnd: &end,
// chunk: tt.chunk,
// },
// cc: 1,
// isVerbose: false,
// }
//
// err = rmp.run(ctx)
// if err != nil {
// t.Fatalf("failed to run remote read processor: %s", err)
// }
// })
// }
// }
//
// func TestSteamRemoteRead(t *testing.T) {
// barpool.Disable(true)
// defer func() {
// barpool.Disable(false)
// }()
// defer func() { isSilent = false }()
//
// var testCases = []struct {
// name string
// remoteReadConfig remoteread.Config
// vmCfg vm.Config
// start string
// end string
// numOfSamples int64
// numOfSeries int64
// rrp remoteReadProcessor
// chunk string
// remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
// expectedSeries []vm.TimeSeries
// }{
// {
// name: "step minute on minute time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
// vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-11-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2,
// numOfSeries: 3,
// chunk: stepper.StepMinute,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{0, 0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{100, 100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{200, 200},
// },
// },
// },
// {
// name: "step month on month time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
// vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-09-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2,
// numOfSeries: 3,
// chunk: stepper.StepMonth,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1664184185000},
// Values: []float64{200},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{0},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{100},
// },
// {
// Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1666819415000},
// Values: []float64{200}},
// },
// },
// }
//
// for _, tt := range testCases {
// t.Run(tt.name, func(t *testing.T) {
// ctx := context.Background()
// remoteReadServer := remote_read_integration.NewRemoteReadStreamServer(t)
// defer remoteReadServer.Close()
// remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
// defer remoteWriteServer.Close()
//
// tt.remoteReadConfig.Addr = remoteReadServer.URL()
//
// rr, err := remoteread.NewClient(tt.remoteReadConfig)
// if err != nil {
// t.Fatalf("error create remote read client: %s", err)
// }
//
// start, err := time.Parse(time.RFC3339, tt.start)
// if err != nil {
// t.Fatalf("Error parse start time: %s", err)
// }
//
// end, err := time.Parse(time.RFC3339, tt.end)
// if err != nil {
// t.Fatalf("Error parse end time: %s", err)
// }
//
// rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
//
// remoteReadServer.InitMockStorage(rrs)
// remoteWriteServer.ExpectedSeries(tt.expectedSeries)
//
// tt.vmCfg.Addr = remoteWriteServer.URL()
//
// b, err := backoff.New(10, 1.8, time.Second*2)
// if err != nil {
// t.Fatalf("failed to create backoff: %s", err)
// }
//
// tt.vmCfg.Backoff = b
// importer, err := vm.NewImporter(ctx, tt.vmCfg)
// if err != nil {
// t.Fatalf("failed to create VM importer: %s", err)
// }
// defer importer.Close()
//
// rmp := remoteReadProcessor{
// src: rr,
// dst: importer,
// filter: remoteReadFilter{
// timeStart: &start,
// timeEnd: &end,
// chunk: tt.chunk,
// },
// cc: 1,
// isVerbose: false,
// }
//
// err = rmp.run(ctx)
// if err != nil {
// t.Fatalf("failed to run remote read processor: %s", err)
// }
// })
// }
// }

View File

@ -15,8 +15,10 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
@ -238,7 +240,7 @@ func processStreamResponse(body io.ReadCloser, callback StreamCallback) error {
bb := bbPool.Get()
defer func() { bbPool.Put(bb) }()
stream := remote.NewChunkedReader(body, remote.DefaultChunkedReadLimit, bb.B)
stream := remote.NewChunkedReader(body, config.DefaultChunkedReadLimit, bb.B)
for {
res := &prompb.ChunkedReadResponse{}
err := stream.NextProto(res)

View File

@ -1,368 +1,368 @@
package remote_read_integration
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunks"
)
const (
maxBytesInFrame = 1024 * 1024
)
type RemoteReadServer struct {
server *httptest.Server
series []*prompb.TimeSeries
storage *MockStorage
}
// NewRemoteReadServer creates a remote read server. It exposes a single endpoint and responds with the
// passed series based on the request to the read endpoint. It returns a server which should be closed after
// being used.
func NewRemoteReadServer(t *testing.T) *RemoteReadServer {
rrs := &RemoteReadServer{
series: make([]*prompb.TimeSeries, 0),
}
rrs.server = httptest.NewServer(rrs.getReadHandler(t))
return rrs
}
// Close closes the server.
func (rrs *RemoteReadServer) Close() {
rrs.server.Close()
}
func (rrs *RemoteReadServer) URL() string {
return rrs.server.URL
}
func (rrs *RemoteReadServer) SetRemoteReadSeries(series []*prompb.TimeSeries) {
rrs.series = append(rrs.series, series...)
}
func (rrs *RemoteReadServer) getReadHandler(t *testing.T) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !validateReadHeaders(t, r) {
t.Fatalf("invalid read headers")
}
compressed, err := io.ReadAll(r.Body)
if err != nil {
t.Fatalf("error read body: %s", err)
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
t.Fatalf("error decode compressed data:%s", err)
}
var req prompb.ReadRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
t.Fatalf("error unmarshal read request: %s", err)
}
resp := &prompb.ReadResponse{
Results: make([]*prompb.QueryResult, len(req.Queries)),
}
for i, r := range req.Queries {
startTs := r.StartTimestampMs
endTs := r.EndTimestampMs
ts := make([]*prompb.TimeSeries, len(rrs.series))
for i, s := range rrs.series {
var samples []prompb.Sample
for _, sample := range s.Samples {
if sample.Timestamp >= startTs && sample.Timestamp < endTs {
samples = append(samples, sample)
}
}
var series prompb.TimeSeries
if len(samples) > 0 {
series.Labels = s.Labels
series.Samples = samples
}
ts[i] = &series
}
resp.Results[i] = &prompb.QueryResult{Timeseries: ts}
data, err := proto.Marshal(resp)
if err != nil {
t.Fatalf("error marshal response: %s", err)
}
compressed = snappy.Encode(nil, data)
w.Header().Set("Content-Type", "application/x-protobuf")
w.Header().Set("Content-Encoding", "snappy")
w.WriteHeader(http.StatusOK)
if _, err := w.Write(compressed); err != nil {
t.Fatalf("snappy encode error: %s", err)
}
}
})
}
func NewRemoteReadStreamServer(t *testing.T) *RemoteReadServer {
rrs := &RemoteReadServer{
series: make([]*prompb.TimeSeries, 0),
}
rrs.server = httptest.NewServer(rrs.getStreamReadHandler(t))
return rrs
}
func (rrs *RemoteReadServer) InitMockStorage(series []*prompb.TimeSeries) {
rrs.storage = NewMockStorage(series)
}
func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !validateStreamReadHeaders(t, r) {
t.Fatalf("invalid read headers")
}
f, ok := w.(http.Flusher)
if !ok {
t.Fatalf("internal http.ResponseWriter does not implement http.Flusher interface")
}
stream := remote.NewChunkedWriter(w, f)
data, err := io.ReadAll(r.Body)
if err != nil {
t.Fatalf("error read body: %s", err)
}
decodedData, err := snappy.Decode(nil, data)
if err != nil {
t.Fatalf("error decode compressed data:%s", err)
}
var req prompb.ReadRequest
if err := proto.Unmarshal(decodedData, &req); err != nil {
t.Fatalf("error unmarshal read request: %s", err)
}
var chks []prompb.Chunk
ctx := context.Background()
for idx, r := range req.Queries {
startTs := r.StartTimestampMs
endTs := r.EndTimestampMs
var matchers []*labels.Matcher
cb := func() (int64, error) { return 0, nil }
c := remote.NewSampleAndChunkQueryableClient(rrs.storage, nil, matchers, true, cb)
q, err := c.ChunkQuerier(startTs, endTs)
if err != nil {
t.Fatalf("error init chunk querier: %s", err)
}
ss := q.Select(ctx, false, nil, matchers...)
var iter chunks.Iterator
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil)
frameBytesLeft := maxBytesInFrame
for _, lb := range labels {
frameBytesLeft -= lb.Size()
}
isNext := iter.Next()
for isNext {
chunk := iter.At()
if chunk.Chunk == nil {
t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref)
}
chks = append(chks, prompb.Chunk{
MinTimeMs: chunk.MinTime,
MaxTimeMs: chunk.MaxTime,
Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()),
Data: chunk.Chunk.Bytes(),
})
frameBytesLeft -= chks[len(chks)-1].Size()
// We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
isNext = iter.Next()
if frameBytesLeft > 0 && isNext {
continue
}
resp := &prompb.ChunkedReadResponse{
ChunkedSeries: []*prompb.ChunkedSeries{
{Labels: labels, Chunks: chks},
},
QueryIndex: int64(idx),
}
b, err := proto.Marshal(resp)
if err != nil {
t.Fatalf("error marshal response: %s", err)
}
if _, err := stream.Write(b); err != nil {
t.Fatalf("error write to stream: %s", err)
}
chks = chks[:0]
rrs.storage.Reset()
}
if err := iter.Err(); err != nil {
t.Fatalf("error iterate over chunk series: %s", err)
}
}
}
})
}
func validateReadHeaders(t *testing.T, r *http.Request) bool {
if r.Method != http.MethodPost {
t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
}
if r.Header.Get("Content-Encoding") != "snappy" {
t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
}
if r.Header.Get("Content-Type") != "application/x-protobuf" {
t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-protobuf")
}
remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
if remoteReadVersion == "" {
t.Fatalf("got empty prometheus remote read header")
}
if !strings.HasPrefix(remoteReadVersion, "0.1.") {
t.Fatalf("wrong remote version defined")
}
return true
}
func validateStreamReadHeaders(t *testing.T, r *http.Request) bool {
if r.Method != http.MethodPost {
t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
}
if r.Header.Get("Content-Encoding") != "snappy" {
t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
}
if r.Header.Get("Content-Type") != "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" {
t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
}
remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
if remoteReadVersion == "" {
t.Fatalf("got empty prometheus remote read header")
}
if !strings.HasPrefix(remoteReadVersion, "0.1.") {
t.Fatalf("wrong remote version defined")
}
return true
}
func GenerateRemoteReadSeries(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries {
var ts []*prompb.TimeSeries
j := 0
for i := 0; i < int(numOfSeries); i++ {
if i%3 == 0 {
j++
}
timeSeries := prompb.TimeSeries{
Labels: []prompb.Label{
{Name: labels.MetricName, Value: fmt.Sprintf("vm_metric_%d", j)},
{Name: "job", Value: strconv.Itoa(i)},
},
}
ts = append(ts, &timeSeries)
}
for i := range ts {
ts[i].Samples = generateRemoteReadSamples(i, start, end, numOfSamples)
}
return ts
}
func generateRemoteReadSamples(idx int, startTime, endTime, numOfSamples int64) []prompb.Sample {
samples := make([]prompb.Sample, 0)
delta := (endTime - startTime) / numOfSamples
t := startTime
for t != endTime {
v := 100 * int64(idx)
samples = append(samples, prompb.Sample{
Timestamp: t * 1000,
Value: float64(v),
})
t = t + delta
}
return samples
}
type MockStorage struct {
query *prompb.Query
store []*prompb.TimeSeries
}
func NewMockStorage(series []*prompb.TimeSeries) *MockStorage {
return &MockStorage{store: series}
}
func (ms *MockStorage) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
if ms.query != nil {
return nil, fmt.Errorf("expected only one call to remote client got: %v", query)
}
ms.query = query
q := &prompb.QueryResult{Timeseries: make([]*prompb.TimeSeries, 0, len(ms.store))}
for _, s := range ms.store {
var samples []prompb.Sample
for _, sample := range s.Samples {
if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp < query.EndTimestampMs {
samples = append(samples, sample)
}
}
var series prompb.TimeSeries
if len(samples) > 0 {
series.Labels = s.Labels
series.Samples = samples
}
q.Timeseries = append(q.Timeseries, &series)
}
return q, nil
}
func (ms *MockStorage) Reset() {
ms.query = nil
}
func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
result := make([]prompb.Label, 0, len(labels))
for _, l := range labels {
result = append(result, prompb.Label{
Name: l.Name,
Value: l.Value,
})
}
return result
}
// import (
// "context"
// "fmt"
// "io"
// "net/http"
// "net/http/httptest"
// "strconv"
// "strings"
// "testing"
//
// "github.com/gogo/protobuf/proto"
// "github.com/golang/snappy"
// "github.com/prometheus/prometheus/model/labels"
// "github.com/prometheus/prometheus/prompb"
// "github.com/prometheus/prometheus/storage/remote"
// "github.com/prometheus/prometheus/tsdb/chunks"
// )
//
// const (
// maxBytesInFrame = 1024 * 1024
// )
//
// type RemoteReadServer struct {
// server *httptest.Server
// series []*prompb.TimeSeries
// storage *MockStorage
// }
//
// // NewRemoteReadServer creates a remote read server. It exposes a single endpoint and responds with the
// // passed series based on the request to the read endpoint. It returns a server which should be closed after
// // being used.
// func NewRemoteReadServer(t *testing.T) *RemoteReadServer {
// rrs := &RemoteReadServer{
// series: make([]*prompb.TimeSeries, 0),
// }
// rrs.server = httptest.NewServer(rrs.getReadHandler(t))
// return rrs
// }
//
// // Close closes the server.
// func (rrs *RemoteReadServer) Close() {
// rrs.server.Close()
// }
//
// func (rrs *RemoteReadServer) URL() string {
// return rrs.server.URL
// }
//
// func (rrs *RemoteReadServer) SetRemoteReadSeries(series []*prompb.TimeSeries) {
// rrs.series = append(rrs.series, series...)
// }
//
// func (rrs *RemoteReadServer) getReadHandler(t *testing.T) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// if !validateReadHeaders(t, r) {
// t.Fatalf("invalid read headers")
// }
//
// compressed, err := io.ReadAll(r.Body)
// if err != nil {
// t.Fatalf("error read body: %s", err)
// }
//
// reqBuf, err := snappy.Decode(nil, compressed)
// if err != nil {
// t.Fatalf("error decode compressed data:%s", err)
// }
//
// var req prompb.ReadRequest
// if err := proto.Unmarshal(reqBuf, &req); err != nil {
// t.Fatalf("error unmarshal read request: %s", err)
// }
//
// resp := &prompb.ReadResponse{
// Results: make([]*prompb.QueryResult, len(req.Queries)),
// }
//
// for i, r := range req.Queries {
// startTs := r.StartTimestampMs
// endTs := r.EndTimestampMs
// ts := make([]*prompb.TimeSeries, len(rrs.series))
// for i, s := range rrs.series {
// var samples []prompb.Sample
// for _, sample := range s.Samples {
// if sample.Timestamp >= startTs && sample.Timestamp < endTs {
// samples = append(samples, sample)
// }
// }
// var series prompb.TimeSeries
// if len(samples) > 0 {
// series.Labels = s.Labels
// series.Samples = samples
// }
// ts[i] = &series
// }
//
// resp.Results[i] = &prompb.QueryResult{Timeseries: ts}
// data, err := proto.Marshal(resp)
// if err != nil {
// t.Fatalf("error marshal response: %s", err)
// }
//
// compressed = snappy.Encode(nil, data)
//
// w.Header().Set("Content-Type", "application/x-protobuf")
// w.Header().Set("Content-Encoding", "snappy")
// w.WriteHeader(http.StatusOK)
//
// if _, err := w.Write(compressed); err != nil {
// t.Fatalf("snappy encode error: %s", err)
// }
// }
// })
// }
//
// func NewRemoteReadStreamServer(t *testing.T) *RemoteReadServer {
// rrs := &RemoteReadServer{
// series: make([]*prompb.TimeSeries, 0),
// }
// rrs.server = httptest.NewServer(rrs.getStreamReadHandler(t))
// return rrs
// }
//
// func (rrs *RemoteReadServer) InitMockStorage(series []*prompb.TimeSeries) {
// rrs.storage = NewMockStorage(series)
// }
//
// func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// if !validateStreamReadHeaders(t, r) {
// t.Fatalf("invalid read headers")
// }
//
// f, ok := w.(http.Flusher)
// if !ok {
// t.Fatalf("internal http.ResponseWriter does not implement http.Flusher interface")
// }
//
// stream := remote.NewChunkedWriter(w, f)
//
// data, err := io.ReadAll(r.Body)
// if err != nil {
// t.Fatalf("error read body: %s", err)
// }
//
// decodedData, err := snappy.Decode(nil, data)
// if err != nil {
// t.Fatalf("error decode compressed data:%s", err)
// }
//
// var req prompb.ReadRequest
// if err := proto.Unmarshal(decodedData, &req); err != nil {
// t.Fatalf("error unmarshal read request: %s", err)
// }
//
// var chks []prompb.Chunk
// ctx := context.Background()
// for idx, r := range req.Queries {
// startTs := r.StartTimestampMs
// endTs := r.EndTimestampMs
//
// var matchers []*labels.Matcher
// cb := func() (int64, error) { return 0, nil }
//
// c := remote.NewSampleAndChunkQueryableClient(rrs.storage, nil, matchers, true, cb)
//
// q, err := c.ChunkQuerier(startTs, endTs)
// if err != nil {
// t.Fatalf("error init chunk querier: %s", err)
// }
//
// ss := q.Select(ctx, false, nil, matchers...)
// var iter chunks.Iterator
// for ss.Next() {
// series := ss.At()
// iter = series.Iterator(iter)
// labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil)
//
// frameBytesLeft := maxBytesInFrame
// for _, lb := range labels {
// frameBytesLeft -= lb.Size()
// }
//
// isNext := iter.Next()
//
// for isNext {
// chunk := iter.At()
//
// if chunk.Chunk == nil {
// t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref)
// }
//
// chks = append(chks, prompb.Chunk{
// MinTimeMs: chunk.MinTime,
// MaxTimeMs: chunk.MaxTime,
// Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()),
// Data: chunk.Chunk.Bytes(),
// })
//
// frameBytesLeft -= chks[len(chks)-1].Size()
//
// // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
// isNext = iter.Next()
// if frameBytesLeft > 0 && isNext {
// continue
// }
//
// resp := &prompb.ChunkedReadResponse{
// ChunkedSeries: []*prompb.ChunkedSeries{
// {Labels: labels, Chunks: chks},
// },
// QueryIndex: int64(idx),
// }
//
// b, err := proto.Marshal(resp)
// if err != nil {
// t.Fatalf("error marshal response: %s", err)
// }
//
// if _, err := stream.Write(b); err != nil {
// t.Fatalf("error write to stream: %s", err)
// }
// chks = chks[:0]
// rrs.storage.Reset()
// }
// if err := iter.Err(); err != nil {
// t.Fatalf("error iterate over chunk series: %s", err)
// }
// }
// }
// })
// }
//
// func validateReadHeaders(t *testing.T, r *http.Request) bool {
// if r.Method != http.MethodPost {
// t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
// }
// if r.Header.Get("Content-Encoding") != "snappy" {
// t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
// }
// if r.Header.Get("Content-Type") != "application/x-protobuf" {
// t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-protobuf")
// }
//
// remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
// if remoteReadVersion == "" {
// t.Fatalf("got empty prometheus remote read header")
// }
// if !strings.HasPrefix(remoteReadVersion, "0.1.") {
// t.Fatalf("wrong remote version defined")
// }
//
// return true
// }
//
// func validateStreamReadHeaders(t *testing.T, r *http.Request) bool {
// if r.Method != http.MethodPost {
// t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
// }
// if r.Header.Get("Content-Encoding") != "snappy" {
// t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
// }
// if r.Header.Get("Content-Type") != "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" {
// t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
// }
//
// remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
// if remoteReadVersion == "" {
// t.Fatalf("got empty prometheus remote read header")
// }
// if !strings.HasPrefix(remoteReadVersion, "0.1.") {
// t.Fatalf("wrong remote version defined")
// }
// return true
// }
//
// func GenerateRemoteReadSeries(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries {
// var ts []*prompb.TimeSeries
// j := 0
// for i := 0; i < int(numOfSeries); i++ {
// if i%3 == 0 {
// j++
// }
//
// timeSeries := prompb.TimeSeries{
// Labels: []prompb.Label{
// {Name: labels.MetricName, Value: fmt.Sprintf("vm_metric_%d", j)},
// {Name: "job", Value: strconv.Itoa(i)},
// },
// }
//
// ts = append(ts, &timeSeries)
// }
//
// for i := range ts {
// ts[i].Samples = generateRemoteReadSamples(i, start, end, numOfSamples)
// }
//
// return ts
// }
//
// func generateRemoteReadSamples(idx int, startTime, endTime, numOfSamples int64) []prompb.Sample {
// samples := make([]prompb.Sample, 0)
// delta := (endTime - startTime) / numOfSamples
//
// t := startTime
// for t != endTime {
// v := 100 * int64(idx)
// samples = append(samples, prompb.Sample{
// Timestamp: t * 1000,
// Value: float64(v),
// })
// t = t + delta
// }
//
// return samples
// }
//
// type MockStorage struct {
// query *prompb.Query
// store []*prompb.TimeSeries
// }
//
// func NewMockStorage(series []*prompb.TimeSeries) *MockStorage {
// return &MockStorage{store: series}
// }
//
// func (ms *MockStorage) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
// if ms.query != nil {
// return nil, fmt.Errorf("expected only one call to remote client got: %v", query)
// }
// ms.query = query
//
// q := &prompb.QueryResult{Timeseries: make([]*prompb.TimeSeries, 0, len(ms.store))}
// for _, s := range ms.store {
// var samples []prompb.Sample
// for _, sample := range s.Samples {
// if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp < query.EndTimestampMs {
// samples = append(samples, sample)
// }
// }
// var series prompb.TimeSeries
// if len(samples) > 0 {
// series.Labels = s.Labels
// series.Samples = samples
// }
//
// q.Timeseries = append(q.Timeseries, &series)
// }
// return q, nil
// }
//
// func (ms *MockStorage) Reset() {
// ms.query = nil
// }
//
// func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
// result := make([]prompb.Label, 0, len(labels))
// for _, l := range labels {
// result = append(result, prompb.Label{
// Name: l.Name,
// Value: l.Value,
// })
// }
// return result
// }

View File

@ -17,7 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
@ -214,15 +214,15 @@ func processFlags() {
func fillStorage(series []vm.TimeSeries) error {
var mrs []storage.MetricRow
for _, series := range series {
var labels []prompb.Label
var labels []prompbmarshal.Label
for _, lp := range series.LabelPairs {
labels = append(labels, prompb.Label{
labels = append(labels, prompbmarshal.Label{
Name: lp.Name,
Value: lp.Value,
})
}
if series.Name != "" {
labels = append(labels, prompb.Label{
labels = append(labels, prompbmarshal.Label{
Name: "__name__",
Value: series.Name,
})

View File

@ -8,7 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
@ -30,7 +30,7 @@ type InsertCtx struct {
func (ctx *InsertCtx) Reset(rowsLen int) {
labels := ctx.Labels
for i := range labels {
labels[i] = prompb.Label{}
labels[i] = prompbmarshal.Label{}
}
ctx.Labels = labels[:0]
@ -51,7 +51,7 @@ func cleanMetricRow(mr *storage.MetricRow) {
mr.MetricNameRaw = nil
}
func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label) []byte {
func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompbmarshal.Label) []byte {
start := len(ctx.metricNamesBuf)
ctx.metricNamesBuf = append(ctx.metricNamesBuf, prefix...)
ctx.metricNamesBuf = storage.MarshalMetricNameRaw(ctx.metricNamesBuf, labels)
@ -60,7 +60,7 @@ func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label)
}
// WriteDataPoint writes (timestamp, value) with the given prefix and labels into ctx buffer.
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompb.Label, timestamp int64, value float64) error {
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label, timestamp int64, value float64) error {
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
return ctx.addRow(metricNameRaw, timestamp, value)
}
@ -68,7 +68,7 @@ func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompb.Label, times
// WriteDataPointExt writes (timestamp, value) with the given metricNameRaw and labels into ctx buffer.
//
// It returns metricNameRaw for the given labels if len(metricNameRaw) == 0.
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompb.Label, timestamp int64, value float64) ([]byte, error) {
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompbmarshal.Label, timestamp int64, value float64) ([]byte, error) {
if len(metricNameRaw) == 0 {
metricNameRaw = ctx.marshalMetricNameRaw(nil, labels)
}
@ -106,7 +106,7 @@ func (ctx *InsertCtx) AddLabelBytes(name, value []byte) {
// Do not skip labels with empty name, since they are equal to __name__.
return
}
ctx.Labels = append(ctx.Labels, prompb.Label{
ctx.Labels = append(ctx.Labels, prompbmarshal.Label{
// Do not copy name and value contents for performance reasons.
// This reduces GC overhead on the number of objects and allocations.
Name: bytesutil.ToUnsafeString(name),
@ -124,7 +124,7 @@ func (ctx *InsertCtx) AddLabel(name, value string) {
// Do not skip labels with empty name, since they are equal to __name__.
return
}
ctx.Labels = append(ctx.Labels, prompb.Label{
ctx.Labels = append(ctx.Labels, prompbmarshal.Label{
// Do not copy name and value contents for performance reasons.
// This reduces GC overhead on the number of objects and allocations.
Name: name,

View File

@ -4,7 +4,7 @@ import (
"flag"
"sort"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
var sortLabels = flag.Bool("sortLabels", false, `Whether to sort labels for incoming samples before writing them to storage. `+
@ -19,7 +19,7 @@ func (ctx *InsertCtx) SortLabelsIfNeeded() {
}
}
type sortedLabels []prompb.Label
type sortedLabels []prompbmarshal.Label
func (sl *sortedLabels) Len() int { return len(*sl) }
func (sl *sortedLabels) Less(i, j int) bool {

View File

@ -9,7 +9,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
@ -150,7 +149,7 @@ type pushCtx struct {
Common common.InsertCtx
metricNameBuf []byte
metricGroupBuf []byte
originLabels []prompb.Label
originLabels []prompbmarshal.Label
}
func (ctx *pushCtx) reset() {
@ -160,7 +159,7 @@ func (ctx *pushCtx) reset() {
originLabels := ctx.originLabels
for i := range originLabels {
originLabels[i] = prompb.Label{}
originLabels[i] = prompbmarshal.Label{}
}
ctx.originLabels = originLabels[:0]
}

View File

@ -8,7 +8,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/metrics"
@ -108,7 +107,7 @@ func (ctx *Ctx) Reset() {
// ApplyRelabeling applies relabeling to the given labels and returns the result.
//
// The returned labels are valid until the next call to ApplyRelabeling.
func (ctx *Ctx) ApplyRelabeling(labels []prompb.Label) []prompb.Label {
func (ctx *Ctx) ApplyRelabeling(labels []prompbmarshal.Label) []prompbmarshal.Label {
pcs := pcsGlobal.Load()
if pcs.Len() == 0 && !*usePromCompatibleNaming {
// There are no relabeling rules.
@ -159,7 +158,7 @@ func (ctx *Ctx) ApplyRelabeling(labels []prompb.Label) []prompb.Label {
name = ""
}
value := label.Value
dst = append(dst, prompb.Label{
dst = append(dst, prompbmarshal.Label{
Name: name,
Value: value,
})

View File

@ -14,7 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
@ -95,7 +95,7 @@ func registerMetrics(startTime time.Time, w http.ResponseWriter, r *http.Request
_ = deadline // TODO: use the deadline as in the cluster branch
paths := r.Form["path"]
var row graphiteparser.Row
var labels []prompb.Label
var labels []prompbmarshal.Label
var b []byte
var tagsPool []graphiteparser.Tag
mrs := make([]storage.MetricRow, len(paths))
@ -122,12 +122,12 @@ func registerMetrics(startTime time.Time, w http.ResponseWriter, r *http.Request
canonicalPaths[i] = string(b)
// Convert parsed metric and tags to labels.
labels = append(labels[:0], prompb.Label{
labels = append(labels[:0], prompbmarshal.Label{
Name: "__name__",
Value: row.Metric,
})
for _, tag := range row.Tags {
labels = append(labels, prompb.Label{
labels = append(labels, prompbmarshal.Label{
Name: tag.Key,
Value: tag.Value,
})

View File

@ -9398,6 +9398,18 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r1, r2, r3, r4}
f(q, resultExpected)
})
t.Run("nan^any", func(t *testing.T) {
t.Parallel()
q := `(hour(time()*1e4) == 4)^1`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{nan, nan, nan, 4, nan, nan},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
}
func TestExecError(t *testing.T) {

View File

@ -37,7 +37,7 @@ type panelSettings struct {
Unit string `json:"unit,omitempty"`
Expr []string `json:"expr"`
Alias []string `json:"alias,omitempty"`
ShowLegend bool `json:"showLegend,omitempty"`
ShowLegend *bool `json:"showLegend"`
Width int `json:"width,omitempty"`
}
@ -107,6 +107,17 @@ func collectDashboardsSettings(path string) ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("cannot parse file %s: %w", filePath, err)
}
for i := range ds.Rows {
for j := range ds.Rows[i].Panels {
// Set default value for ShowLegend = true if it is not specified
if ds.Rows[i].Panels[j].ShowLegend == nil {
defaultValue := true
ds.Rows[i].Panels[j].ShowLegend = &defaultValue
}
}
}
if len(ds.Rows) > 0 {
dss = append(dss, ds)
}

View File

@ -1,13 +1,13 @@
{
"files": {
"main.css": "./static/css/main.d781989c.css",
"main.js": "./static/js/main.a7037969.js",
"main.css": "./static/css/main.b1929c64.css",
"main.js": "./static/js/main.a7d57628.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.d781989c.css",
"static/js/main.a7037969.js"
"static/css/main.b1929c64.css",
"static/js/main.a7d57628.js"
]
}

View File

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.a7037969.js"></script><link href="./static/css/main.d781989c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.a7d57628.js"></script><link href="./static/css/main.b1929c64.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -9,12 +9,12 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/go-kit/kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/model/labels"
promstorage "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"log/slog"
)
var prometheusDataPath = flag.String("prometheusDataPath", "", "Optional path to readonly historical Prometheus data")
@ -32,10 +32,7 @@ func Init(retentionMsecs int64) {
if *prometheusDataPath == "" {
return
}
l := log.LoggerFunc(func(a ...interface{}) error {
logger.Infof("%v", a)
return nil
})
l := slog.New(slog.Default().Handler())
opts := tsdb.DefaultOptions()
opts.RetentionDuration = retentionMsecs
@ -110,7 +107,7 @@ func GetLabelNamesOnTimeRange(tr storage.TimeRange, deadline searchutils.Deadlin
}
defer mustCloseQuerier(q)
names, _, err := q.LabelNames(ctx)
names, _, err := q.LabelNames(ctx, nil)
// Make full copy of names, since they cannot be used after q is closed.
names = copyStringsWithMemory(names)
return names, err
@ -130,7 +127,7 @@ func GetLabelValuesOnTimeRange(labelName string, tr storage.TimeRange, deadline
}
defer mustCloseQuerier(q)
values, _, err := q.LabelValues(ctx, labelName)
values, _, err := q.LabelValues(ctx, labelName, nil)
// Make full copy of values, since they cannot be used after q is closed.
values = copyStringsWithMemory(values)
return values, err

View File

@ -127,15 +127,27 @@ DashboardRow:
<br/>
PanelSettings:
| Name | Type | Description |
|:------------|:----------:|--------------------------------------------------------------------------------------:|
| expr* | `string[]` | Data source queries |
| alias | `string[]` | Expression alias. Matched by index in array |
| title | `string` | Panel title |
| description | `string` | Additional information about the panel |
| unit | `string` | Y-axis unit |
| showLegend | `boolean` | If `false`, the legend hide. Default value - `true` |
| width | `number` | The number of columns the panel uses.<br/> From 1 (minimum width) to 12 (full width). |
| Name | Type | Description |
|:------------|:----------:|---------------------------------------------------------------------------------------------------------------:|
| expr* | `string[]` | Data source queries |
| alias | `string[]` | An array of aliases for each expression in `expr`. See [Template Support in alias](#template-support-in-alias) |
| title | `string` | Panel title |
| description | `string` | Additional information about the panel |
| unit | `string` | Y-axis unit |
| showLegend | `boolean` | If `false`, the legend hide. Default value - `true` |
| width | `number` | The number of columns the panel uses.<br/> From 1 (minimum width) to 12 (full width). |
### Template Support in `alias`
To create more readable metric names in the legend, you can use constructions like `{{label_name}}`, where `label_name`
is the label's name.
If the label exists in the metric, its value will be substituted in the template.
If the label is missing, the legend will use the default name.
**Example:**
Metric: `metric{foo="bar",baz="qux"}`
Alias: `{{foo}} - {{baz}}`
Legend: `bar - qux`
### Example json

View File

@ -20,6 +20,7 @@ module.exports = {
"@typescript-eslint"
],
"rules": {
"@typescript-eslint/no-unused-vars": ["warn", { "varsIgnorePattern": "^_" }],
"react/jsx-closing-bracket-location": [1, "line-aligned"],
"react/jsx-max-props-per-line":[1, { "maximum": 1 }],
"react/jsx-first-prop-new-line": [1, "multiline"],

View File

@ -17,6 +17,7 @@ import ActiveQueries from "./pages/ActiveQueries";
import QueryAnalyzer from "./pages/QueryAnalyzer";
import DownsamplingFilters from "./pages/DownsamplingFilters";
import RetentionFilters from "./pages/RetentionFilters";
import RawQueryPage from "./pages/RawQueryPage";
const App: FC = () => {
const [loadedTheme, setLoadedTheme] = useState(false);
@ -36,6 +37,10 @@ const App: FC = () => {
path={router.home}
element={<CustomPanel/>}
/>
<Route
path={router.rawQuery}
element={<RawQueryPage/>}
/>
<Route
path={router.metrics}
element={<ExploreMetrics/>}

View File

@ -5,3 +5,13 @@ export const getQueryRangeUrl = (server: string, query: string, period: TimePara
export const getQueryUrl = (server: string, query: string, period: TimeParams, nocache: boolean, queryTracing: boolean): string =>
`${server}/api/v1/query?query=${encodeURIComponent(query)}&time=${period.end}&step=${period.step}${nocache ? "&nocache=1" : ""}${queryTracing ? "&trace=1" : ""}`;
export const getExportDataUrl = (server: string, query: string, period: TimeParams, reduceMemUsage: boolean): string => {
const params = new URLSearchParams({
"match[]": query,
start: period.start.toString(),
end: period.end.toString(),
});
if (reduceMemUsage) params.set("reduce_mem_usage", "1");
return `${server}/api/v1/export?${params}`;
};

View File

@ -15,6 +15,11 @@ export interface InstantMetricResult extends MetricBase {
values?: [number, string][]
}
export interface ExportMetricResult extends MetricBase {
values: number[];
timestamps: number[];
}
export interface TracingData {
message: string;
duration_msec: number;

View File

@ -56,19 +56,23 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap, isAnomal
)}
<div className="vm-legend-item-info">
<span className="vm-legend-item-info__label">
{legend.freeFormFields["__name__"]}
{!!freeFormFields.length && <>&#123;</>}
{freeFormFields.map((f, i) => (
<span
className="vm-legend-item-info__free-fields"
key={f.key}
onClick={createHandlerCopy(f.freeField)}
title="copy to clipboard"
>
{f.freeField}{i + 1 < freeFormFields.length && ","}
</span>
))}
{!!freeFormFields.length && <>&#125;</>}
{legend.hasAlias ? legend.label : (
<>
{legend.freeFormFields["__name__"]}
{!!freeFormFields.length && <>&#123;</>}
{freeFormFields.map((f, i) => (
<span
className="vm-legend-item-info__free-fields"
key={f.key}
onClick={createHandlerCopy(f.freeField)}
title="copy to clipboard"
>
{f.freeField}{i + 1 < freeFormFields.length && ","}
</span>
))}
{!!freeFormFields.length && <>&#125;</>}
</>
)}
</span>
</div>
{!isHeatmap && showStats && (

View File

@ -20,13 +20,17 @@ const AdditionalSettingsControls: FC<Props & {isMobile?: boolean}> = ({ isMobile
const { autocomplete } = useQueryState();
const queryDispatch = useQueryDispatch();
const { nocache, isTracingEnabled } = useCustomPanelState();
const { nocache, isTracingEnabled, reduceMemUsage } = useCustomPanelState();
const customPanelDispatch = useCustomPanelDispatch();
const onChangeCache = () => {
customPanelDispatch({ type: "TOGGLE_NO_CACHE" });
};
const onChangeReduceMemUsage = () => {
customPanelDispatch({ type: "TOGGLE_REDUCE_MEM_USAGE" });
};
const onChangeQueryTracing = () => {
customPanelDispatch({ type: "TOGGLE_QUERY_TRACING" });
};
@ -67,12 +71,22 @@ const AdditionalSettingsControls: FC<Props & {isMobile?: boolean}> = ({ isMobile
/>
</Tooltip>
)}
<Switch
label={"Disable cache"}
value={nocache}
onChange={onChangeCache}
fullWidth={isMobile}
/>
{!hideButtons?.disableCache && (
<Switch
label={"Disable cache"}
value={nocache}
onChange={onChangeCache}
fullWidth={isMobile}
/>
)}
{!hideButtons?.reduceMemUsage && (
<Switch
label={"Disable deduplication"}
value={reduceMemUsage}
onChange={onChangeReduceMemUsage}
fullWidth={isMobile}
/>
)}
{!hideButtons?.traceQuery && (
<Switch
label={"Trace query"}

View File

@ -23,6 +23,7 @@ export interface QueryEditorProps {
stats?: QueryStats;
label: string;
disabled?: boolean
includeFunctions?: boolean;
}
const QueryEditor: FC<QueryEditorProps> = ({
@ -35,7 +36,8 @@ const QueryEditor: FC<QueryEditorProps> = ({
error,
stats,
label,
disabled = false
disabled = false,
includeFunctions = true
}) => {
const { autocompleteQuick } = useQueryState();
const { isMobile } = useDeviceDetect();
@ -143,6 +145,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
anchorEl={autocompleteAnchorEl}
caretPosition={caretPosition}
hasHelperText={Boolean(warning || error)}
includeFunctions={includeFunctions}
onSelect={handleSelect}
onFoundOptions={handleChangeFoundOptions}
/>

View File

@ -11,6 +11,7 @@ interface QueryEditorAutocompleteProps {
anchorEl: React.RefObject<HTMLElement>;
caretPosition: [number, number]; // [start, end]
hasHelperText: boolean;
includeFunctions: boolean;
onSelect: (val: string, caretPosition: number) => void;
onFoundOptions: (val: AutocompleteOptions[]) => void;
}
@ -20,11 +21,12 @@ const QueryEditorAutocomplete: FC<QueryEditorAutocompleteProps> = ({
anchorEl,
caretPosition,
hasHelperText,
includeFunctions,
onSelect,
onFoundOptions
}) => {
const [offsetPos, setOffsetPos] = useState({ top: 0, left: 0 });
const metricsqlFunctions = useGetMetricsQL();
const metricsqlFunctions = useGetMetricsQL(includeFunctions);
const values = useMemo(() => {
if (caretPosition[0] !== caretPosition[1]) return { beforeCursor: value, afterCursor: "" };

View File

@ -0,0 +1,105 @@
import React from "react";
import { ArrowDownIcon } from "../Icons";
import { useMemo } from "preact/compat";
import classNames from "classnames";
import "./style.scss";
interface PaginationProps {
currentPage: number;
totalItems: number;
itemsPerPage: number;
onPageChange: (page: number) => void;
maxVisiblePages?: number;
}
const Pagination: React.FC<PaginationProps> = ({
currentPage,
totalItems,
itemsPerPage,
onPageChange,
maxVisiblePages = 10
}) => {
const totalPages = Math.ceil(totalItems / itemsPerPage);
const handlePageChange = (page: number) => {
if (page < 1 || page > totalPages) return;
onPageChange(page);
};
const pages = useMemo(() => {
const pages = [];
if (totalPages <= maxVisiblePages) {
for (let i = 1; i <= totalPages; i++) {
pages.push(i);
}
} else {
const startPage = Math.max(1, currentPage - Math.floor(maxVisiblePages / 2));
const endPage = Math.min(totalPages, startPage + maxVisiblePages - 1);
if (startPage > 1) {
pages.push(1);
if (startPage > 2) {
pages.push("...");
}
}
for (let i = startPage; i <= endPage; i++) {
pages.push(i);
}
if (endPage < totalPages) {
if (endPage < totalPages - 1) {
pages.push("...");
}
pages.push(totalPages);
}
}
return pages;
}, [totalPages, currentPage, maxVisiblePages]);
const handleClickNav = (stepPage: number) => () => {
handlePageChange(currentPage + stepPage);
};
const handleClickPage = (page: number | string) => () => {
if (typeof page === "number") {
handlePageChange(page);
}
};
if (pages.length <= 1) return null;
return (
<div className="vm-pagination">
<button
className="vm-pagination__page vm-pagination__arrow vm-pagination__arrow_prev"
onClick={handleClickNav(-1)}
disabled={currentPage === 1}
>
<ArrowDownIcon/>
</button>
{pages.map((page, index) => (
<button
key={index}
onClick={handleClickPage(page)}
className={classNames({
"vm-pagination__page": true,
"vm-pagination__page_active": currentPage === page,
"vm-pagination__page_disabled": page === "..."
})}
disabled={page === "..."}
>
{page}
</button>
))}
<button
className="vm-pagination__page vm-pagination__arrow vm-pagination__arrow_next"
onClick={handleClickNav(1)}
disabled={currentPage === totalPages}
>
<ArrowDownIcon/>
</button>
</div>
);
};
export default Pagination;

View File

@ -1,52 +0,0 @@
import React, { FC } from "preact/compat";
import Button from "../../Button/Button";
import { ArrowDownIcon } from "../../Icons";
import "./style.scss";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import classNames from "classnames";
interface PaginationControlProps {
page: number;
length: number;
limit: number;
onChange: (page: number) => void;
}
const PaginationControl: FC<PaginationControlProps> = ({ page, length, limit, onChange }) => {
const { isMobile } = useDeviceDetect();
const handleChangePage = (step: number) => () => {
onChange(+page + step);
window.scrollTo(0, 0);
};
return (
<div
className={classNames({
"vm-pagination": true,
"vm-pagination_mobile": isMobile
})}
>
{page > 1 && (
<Button
variant={"text"}
onClick={handleChangePage(-1)}
startIcon={<div className="vm-pagination__icon vm-pagination__icon_prev"><ArrowDownIcon/></div>}
>
Previous
</Button>
)}
{length >= limit && (
<Button
variant={"text"}
onClick={handleChangePage(1)}
endIcon={<div className="vm-pagination__icon vm-pagination__icon_next"><ArrowDownIcon/></div>}
>
Next
</Button>
)}
</div>
);
};
export default PaginationControl;

View File

@ -1,24 +0,0 @@
@use "src/styles/variables" as *;
.vm-pagination {
position: sticky;
right: 0;
display: flex;
justify-content: flex-end;
gap: $padding-small;
padding: $padding-global 0 0;
&_mobile {
padding: $padding-global 0;
}
&__icon {
&_prev {
transform: rotate(90deg);
}
&_next {
transform: rotate(-90deg);
}
}
}

View File

@ -0,0 +1,66 @@
@use "../../../styles/variables" as *;
.vm-pagination {
position: sticky;
left: 0;
display: flex;
justify-content: center;
gap: $padding-small;
padding: $padding-global 0;
font-size: $font-size;
&_mobile {
padding: $padding-global 0;
}
&__page {
display: flex;
align-items: center;
justify-content: center;
height: 30px;
min-width: 30px;
color: $color-text;
padding: 0 $padding-small;
border-radius: $border-radius-small;
transition: background-color 0.3s;
border: 1px solid transparent;
cursor: pointer;
&_active {
background-color: $color-primary;
color: $color-primary-text;
}
&_disabled {
cursor: default;
pointer-events: none;
color: $color-text-disabled;
}
&:hover {
background-color: $color-hover-black;
}
}
&__arrow {
svg {
max-width: $font-size;
max-height: $font-size;
}
&:disabled {
color: $color-text-disabled;
cursor: default;
pointer-events: none;
}
&_prev {
transform: rotate(90deg);
}
&_next {
transform: rotate(-90deg);
}
}
}

View File

@ -32,8 +32,7 @@ const Table = <T extends object>({ rows, columns, defaultOrderBy, defaultOrderDi
const sortedList = useMemo(() => {
const { startIndex, endIndex } = paginationOffset;
return stableSort(rows as [], getComparator(orderDir, orderBy)).slice(startIndex, endIndex);
},
[rows, orderBy, orderDir, paginationOffset]);
}, [rows, orderBy, orderDir, paginationOffset]);
const createSortHandler = (key: keyof T) => () => {
setOrderDir((prev) => prev === "asc" && orderBy === key ? "desc" : "asc");

View File

@ -180,7 +180,7 @@ const GraphView: FC<GraphViewProps> = ({
if (isAnomalyView) {
setHideSeries(legend.map(s => s.label || "").slice(1));
}
}, [data, timezone, isHistogram]);
}, [data, timezone, isHistogram, currentStep]);
useEffect(() => {
const tempLegend: LegendItemType[] = [];

View File

@ -12,17 +12,7 @@ export interface JsonViewProps {
const JsonView: FC<JsonViewProps> = ({ data }) => {
const copyToClipboard = useCopyToClipboard();
const formattedJson = useMemo(() => {
const space = " ";
const values = data.map(item => {
if (Object.keys(item).length === 1) {
return JSON.stringify(item);
} else {
return JSON.stringify(item, null, space.length);
}
}).join(",\n").replace(/^/gm, `${space}`);
return `[\n${values}\n]`;
}, [data]);
const formattedJson = useMemo(() => JSON.stringify(data, null, 2), [data]);
const handlerCopy = async () => {
await copyToClipboard(formattedJson, "Formatted JSON has been copied");

View File

@ -64,7 +64,7 @@ const useLineTooltip = ({ u, metrics, series, unit, isAnomalyView }: LineTooltip
title: groups.size > 1 && !isAnomalyView ? `Query ${group}` : "",
dates: [date ? dayjs(date * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT) : "-"],
value: formatPrettyNumber(value, min, max),
info: getMetricName(metricItem),
info: getMetricName(metricItem, seriesItem),
statsFormatted: seriesItem?.statsFormatted,
marker: `${seriesItem?.stroke}`,
};

View File

@ -48,7 +48,7 @@ const processGroups = (groups: NodeListOf<Element>): AutocompleteOptions[] => {
}).filter(Boolean) as AutocompleteOptions[];
};
const useGetMetricsQL = () => {
const useGetMetricsQL = (includeFunctions: boolean) => {
const { metricsQLFunctions } = useQueryState();
const queryDispatch = useQueryDispatch();
@ -60,6 +60,7 @@ const useGetMetricsQL = () => {
};
useEffect(() => {
if (!includeFunctions || metricsQLFunctions.length) return;
const fetchMarkdown = async () => {
try {
const resp = await fetch(MetricsQL);
@ -70,12 +71,10 @@ const useGetMetricsQL = () => {
console.error("Error fetching or processing the MetricsQL.md file:", e);
}
};
if (metricsQLFunctions.length) return;
fetchMarkdown();
}, []);
return metricsQLFunctions;
return includeFunctions ? metricsQLFunctions : [];
};
export default useGetMetricsQL;

View File

@ -17,7 +17,11 @@ export const displayTypeTabs: DisplayTab[] = [
{ value: DisplayType.table, icon: <TableIcon/>, label: "Table", prometheusCode: 1 }
];
export const DisplayTypeSwitch: FC = () => {
interface Props {
tabFilter?: (tab: DisplayTab) => boolean
}
export const DisplayTypeSwitch: FC<Props> = ({ tabFilter }) => {
const { displayType } = useCustomPanelState();
const dispatch = useCustomPanelDispatch();
@ -26,10 +30,12 @@ export const DisplayTypeSwitch: FC = () => {
dispatch({ type: "SET_DISPLAY_TYPE", payload: newValue as DisplayType ?? displayType });
};
const items = displayTypeTabs.filter(tabFilter ?? (() => true));
return (
<Tabs
activeItem={displayType}
items={displayTypeTabs}
items={items}
onChange={handleChange}
/>
);

View File

@ -15,6 +15,8 @@ import Alert from "../../../components/Main/Alert/Alert";
import qs from "qs";
import Popper from "../../../components/Main/Popper/Popper";
import helperText from "./helperText";
import { Link } from "react-router-dom";
import router from "../../../router";
type Props = {
fetchUrl?: string[];
@ -125,6 +127,15 @@ const DownloadReport: FC<Props> = ({ fetchUrl }) => {
setStepHelper(0);
}, [openHelper]);
const RawQueryLink = () => (
<Link
className="vm-link vm-link_underlined vm-link_colored"
to={router.rawQuery}
>
Raw Query
</Link>
);
return (
<>
<Tooltip title={"Export query"}>
@ -165,6 +176,10 @@ const DownloadReport: FC<Props> = ({ fetchUrl }) => {
label={"Include query trace"}
/>
</div>
<Alert variant="info">
If confused with the query results,
try viewing the raw samples for selected series in <RawQueryLink/> tab.
</Alert>
</div>
{error && <Alert variant="error">{error}</Alert>}
<div className="vm-download-report__buttons">

View File

@ -4,14 +4,15 @@
display: grid;
gap: $padding-large;
padding-top: calc($padding-large - $padding-global);
min-width: 400px;
width: 700px;
max-width: 100%;
&-settings {
display: grid;
gap: $padding-global;
textarea {
min-height: 100px;
min-height: 200px;
}
}

View File

@ -31,7 +31,9 @@ export interface QueryConfiguratorProps {
setQueryErrors: Dispatch<SetStateAction<string[]>>;
setHideError: Dispatch<SetStateAction<boolean>>;
stats: QueryStats[];
label?: string;
isLoading?: boolean;
includeFunctions?: boolean;
onHideQuery?: (queries: number[]) => void
onRunQuery: () => void;
abortFetch?: () => void;
@ -41,6 +43,8 @@ export interface QueryConfiguratorProps {
autocomplete?: boolean;
traceQuery?: boolean;
anomalyConfig?: boolean;
disableCache?: boolean;
reduceMemUsage?: boolean;
}
}
@ -49,7 +53,9 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
setQueryErrors,
setHideError,
stats,
label,
isLoading,
includeFunctions = true,
onHideQuery,
onRunQuery,
abortFetch,
@ -216,8 +222,9 @@ const QueryConfigurator: FC<QueryConfiguratorProps> = ({
onArrowDown={createHandlerArrow(1, i)}
onEnter={handleRunQuery}
onChange={createHandlerChangeQuery(i)}
label={`Query ${stateQuery.length > 1 ? i + 1 : ""}`}
label={`${label || "Query"} ${stateQuery.length > 1 ? i + 1 : ""}`}
disabled={hideQuery.includes(i)}
includeFunctions={includeFunctions}
/>
{onHideQuery && (
<Tooltip title={hideQuery.includes(i) ? "Enable query" : "Disable query"}>

View File

@ -72,6 +72,16 @@ export const useSetQueryParams = () => {
newSearchParams.set(`${group}.tenantID`, tenantId);
}
});
// Remove extra parameters that exceed the request size
const maxIndex = query.length - 1;
Array.from(newSearchParams.keys()).forEach(key => {
const match = key.match(/^g(\d+)\./);
if (match && parseInt(match[1], 10) > maxIndex) {
newSearchParams.delete(key);
}
});
if (isEqualURLSearchParams(newSearchParams, searchParams) || !newSearchParams.size) return;
setSearchParams(newSearchParams);
}, [tenantId, displayType, query, duration, relativeTime, date, step, customStep]);

View File

@ -85,6 +85,7 @@ const CustomPanel: FC = () => {
onHideQuery={handleHideQuery}
onRunQuery={handleRunQuery}
abortFetch={abortFetch}
hideButtons={{ reduceMemUsage: true }}
/>
<CustomPanelTraces
traces={traces}

View File

@ -87,7 +87,14 @@ const ExploreAnomaly: FC = () => {
setHideError={setHideError}
stats={queryStats}
onRunQuery={handleRunQuery}
hideButtons={{ addQuery: true, prettify: false, autocomplete: false, traceQuery: true, anomalyConfig: true }}
hideButtons={{
addQuery: true,
prettify: false,
autocomplete: false,
traceQuery: true,
anomalyConfig: true,
reduceMemUsage: true,
}}
/>
{isLoading && <Spinner/>}
{(!hideError && error) && <Alert variant="error">{error}</Alert>}

View File

@ -54,7 +54,7 @@ const ExploreLogs: FC = () => {
fetchLogs(newPeriod).then((isSuccess) => {
isSuccess && !hideChart && fetchLogHits(newPeriod);
}).catch(e => e);
setSearchParamsFromKeys( {
setSearchParamsFromKeys({
query,
"g0.range_input": duration,
"g0.end_input": newPeriod.date,

View File

@ -1,22 +1,23 @@
import React, { FC, useState, useMemo, useRef } from "preact/compat";
import JsonView from "../../../components/Views/JsonView/JsonView";
import { CodeIcon, ListIcon, TableIcon } from "../../../components/Main/Icons";
import Tabs from "../../../components/Main/Tabs/Tabs";
import "./style.scss";
import classNames from "classnames";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import { Logs } from "../../../api/types";
import dayjs from "dayjs";
import { useTimeState } from "../../../state/time/TimeStateContext";
import useStateSearchParams from "../../../hooks/useStateSearchParams";
import useSearchParamsFromObject from "../../../hooks/useSearchParamsFromObject";
import TableSettings from "../../../components/Table/TableSettings/TableSettings";
import useBoolean from "../../../hooks/useBoolean";
import TableLogs from "./TableLogs";
import GroupLogs from "../GroupLogs/GroupLogs";
import { DATE_TIME_FORMAT } from "../../../constants/date";
import { marked } from "marked";
import JsonView from "../../../components/Views/JsonView/JsonView";
import LineLoader from "../../../components/Main/LineLoader/LineLoader";
import SelectLimit from "../../../components/Main/Pagination/SelectLimit/SelectLimit";
const MemoizedTableLogs = React.memo(TableLogs);
const MemoizedGroupLogs = React.memo(GroupLogs);
const MemoizedJsonView = React.memo(JsonView);
export interface ExploreLogBodyProps {
data: Logs[];
@ -37,38 +38,35 @@ const tabs = [
const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data, isLoading }) => {
const { isMobile } = useDeviceDetect();
const { timezone } = useTimeState();
const { setSearchParamsFromKeys } = useSearchParamsFromObject();
const groupSettingsRef = useRef<HTMLDivElement>(null);
const [activeTab, setActiveTab] = useStateSearchParams(DisplayType.group, "view");
const [displayColumns, setDisplayColumns] = useState<string[]>([]);
const [rowsPerPage, setRowsPerPage] = useStateSearchParams(1000, "rows_per_page");
const { value: tableCompact, toggle: toggleTableCompact } = useBoolean(false);
const logs = useMemo(() => data.map((item) => ({
...item,
_vmui_time: item._time ? dayjs(item._time).tz().format(`${DATE_TIME_FORMAT}.SSS`) : "",
_vmui_data: JSON.stringify(item, null, 2),
_vmui_markdown: item._msg ? marked(item._msg.replace(/```/g, "\n```\n")) as string : ""
})) as Logs[], [data, timezone]);
const columns = useMemo(() => {
if (!logs?.length) return [];
const hideColumns = ["_vmui_data", "_vmui_time", "_vmui_markdown"];
if (!data?.length) return [];
const keys = new Set<string>();
for (const item of logs) {
for (const item of data) {
for (const key in item) {
keys.add(key);
}
}
return Array.from(keys).filter((col) => !hideColumns.includes(col));
}, [logs]);
return Array.from(keys);
}, [data]);
const handleChangeTab = (view: string) => {
setActiveTab(view as DisplayType);
setSearchParamsFromKeys({ view });
};
const handleSetRowsPerPage = (limit: number) => {
setRowsPerPage(limit);
setSearchParamsFromKeys({ rows_per_page: limit });
};
return (
<div
className={classNames({
@ -97,6 +95,10 @@ const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data, isLoading }) => {
</div>
{activeTab === DisplayType.table && (
<div className="vm-explore-logs-body-header__settings">
<SelectLimit
limit={rowsPerPage}
onChange={handleSetRowsPerPage}
/>
<TableSettings
columns={columns}
selectedColumns={displayColumns}
@ -124,22 +126,22 @@ const ExploreLogsBody: FC<ExploreLogBodyProps> = ({ data, isLoading }) => {
{!!data.length && (
<>
{activeTab === DisplayType.table && (
<TableLogs
logs={logs}
<MemoizedTableLogs
logs={data}
displayColumns={displayColumns}
tableCompact={tableCompact}
columns={columns}
rowsPerPage={Number(rowsPerPage)}
/>
)}
{activeTab === DisplayType.group && (
<GroupLogs
logs={logs}
columns={columns}
<MemoizedGroupLogs
logs={data}
settingsRef={groupSettingsRef}
/>
)}
{activeTab === DisplayType.json && (
<JsonView data={data}/>
<MemoizedJsonView data={data}/>
)}
</>
)}

View File

@ -1,58 +1,94 @@
import React, { FC, useMemo } from "preact/compat";
import React, { FC, useMemo, useRef, useState } from "preact/compat";
import "./style.scss";
import Table from "../../../components/Table/Table";
import { Logs } from "../../../api/types";
import Pagination from "../../../components/Main/Pagination/Pagination";
import { useEffect } from "react";
interface TableLogsProps {
logs: Logs[];
displayColumns: string[];
tableCompact: boolean;
columns: string[];
rowsPerPage: number;
}
const TableLogs: FC<TableLogsProps> = ({ logs, displayColumns, tableCompact, columns }) => {
const getColumnClass = (key: string) => {
switch (key) {
case "_time":
return "vm-table-cell_logs-time";
case "_vmui_data":
return "vm-table-cell_logs vm-table-cell_pre";
default:
return "vm-table-cell_logs";
}
};
const getColumnClass = (key: string) => {
switch (key) {
case "_time":
return "vm-table-cell_logs-time";
default:
return "vm-table-cell_logs";
}
};
const compactColumns = [{
key: "_vmui_data",
title: "Data",
className: "vm-table-cell_logs vm-table-cell_pre"
}];
const TableLogs: FC<TableLogsProps> = ({ logs, displayColumns, tableCompact, columns, rowsPerPage }) => {
const containerRef = useRef<HTMLDivElement>(null);
const [page, setPage] = useState(1);
const rows = useMemo(() => {
return logs.map((log) => {
const _vmui_data = JSON.stringify(log, null, 2);
return { ...log, _vmui_data };
}) as Logs[];
}, [logs]);
const tableColumns = useMemo(() => {
if (tableCompact) {
return [{
key: "_vmui_data",
title: "Data",
className: getColumnClass("_vmui_data")
}];
}
return columns.map((key) => ({
key: key as keyof Logs,
title: key,
className: getColumnClass(key),
}));
}, [tableCompact, columns]);
}, [columns]);
const filteredColumns = useMemo(() => {
if (tableCompact) return tableColumns;
if (tableCompact) return compactColumns;
if (!displayColumns?.length) return [];
return tableColumns.filter(c => displayColumns.includes(c.key as string));
}, [tableColumns, displayColumns, tableCompact]);
const paginationOffset = useMemo(() => {
const startIndex = (page - 1) * rowsPerPage;
const endIndex = startIndex + rowsPerPage;
return { startIndex, endIndex };
}, [page, rowsPerPage]);
const handlePageChange = (newPage: number) => {
setPage(newPage);
if (containerRef.current) {
const y = containerRef.current.getBoundingClientRect().top + window.scrollY - 50;
window.scrollTo({ top: y });
}
};
useEffect(() => {
setPage(1);
}, [logs, rowsPerPage]);
return (
<>
<Table
rows={logs}
columns={filteredColumns}
defaultOrderBy={"_time"}
defaultOrderDir={"desc"}
copyToClipboard={"_vmui_data"}
paginationOffset={{ startIndex: 0, endIndex: Infinity }}
<div ref={containerRef}>
<Table
rows={rows}
columns={filteredColumns}
defaultOrderBy={"_time"}
defaultOrderDir={"desc"}
copyToClipboard={"_vmui_data"}
paginationOffset={paginationOffset}
/>
</div>
<Pagination
currentPage={page}
totalItems={rows.length}
itemsPerPage={rowsPerPage}
onPageChange={handlePageChange}
/>
</>
);

View File

@ -20,13 +20,12 @@ import { getStreamPairs } from "../../../utils/logs";
const WITHOUT_GROUPING = "No Grouping";
interface TableLogsProps {
interface Props {
logs: Logs[];
columns: string[];
settingsRef: React.RefObject<HTMLElement>;
}
const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
const GroupLogs: FC<Props> = ({ logs, settingsRef }) => {
const { isDarkTheme } = useAppState();
const copyToClipboard = useCopyToClipboard();
const [searchParams, setSearchParams] = useSearchParams();
@ -46,19 +45,21 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
const expandAll = useMemo(() => expandGroups.every(Boolean), [expandGroups]);
const logsKeys = useMemo(() => {
const excludeKeys = ["_msg", "_time", "_vmui_time", "_vmui_data", "_vmui_markdown"];
const excludeKeys = ["_msg", "_time"];
const uniqKeys = Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
const keys = [WITHOUT_GROUPING, ...uniqKeys.filter(k => !excludeKeys.includes(k))];
return [WITHOUT_GROUPING, ...uniqKeys.filter(k => !excludeKeys.includes(k))];
}, [logs]);
if (!searchKey) return keys;
const filteredLogsKeys = useMemo(() => {
if (!searchKey) return logsKeys;
try {
const regexp = new RegExp(searchKey, "i");
const found = keys.filter((item) => regexp.test(item));
return found.sort((a,b) => (a.match(regexp)?.index || 0) - (b.match(regexp)?.index || 0));
return logsKeys.filter(item => regexp.test(item))
.sort((a, b) => (a.match(regexp)?.index || 0) - (b.match(regexp)?.index || 0));
} catch (e) {
return [];
}
}, [logs, searchKey]);
}, [logsKeys, searchKey]);
const groupData = useMemo(() => {
return groupByMultipleKeys(logs, [groupBy]).map((item) => {
@ -94,16 +95,15 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
const handleToggleExpandAll = useCallback(() => {
setExpandGroups(new Array(groupData.length).fill(!expandAll));
}, [expandAll]);
}, [expandAll, groupData.length]);
const handleChangeExpand = (i: number) => (value: boolean) => {
const handleChangeExpand = useCallback((i: number) => (value: boolean) => {
setExpandGroups((prev) => {
const newExpandGroups = [...prev];
newExpandGroups[i] = value;
return newExpandGroups;
});
};
}, []);
useEffect(() => {
if (copied === null) return;
@ -170,7 +170,7 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
<Tooltip title={expandAll ? "Collapse All" : "Expand All"}>
<Button
variant="text"
startIcon={expandAll ? <CollapseIcon/> : <ExpandIcon/> }
startIcon={expandAll ? <CollapseIcon/> : <ExpandIcon/>}
onClick={handleToggleExpandAll}
ariaLabel={expandAll ? "Collapse All" : "Expand All"}
/>
@ -179,7 +179,7 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
<div ref={optionsButtonRef}>
<Button
variant="text"
startIcon={<StorageIcon/> }
startIcon={<StorageIcon/>}
onClick={toggleOpenOptions}
ariaLabel={"Group by"}
/>
@ -201,7 +201,7 @@ const GroupLogs: FC<TableLogsProps> = ({ logs, settingsRef }) => {
type="search"
/>
</div>
{logsKeys.map(id => (
{filteredLogsKeys.map(id => (
<div
className={classNames({
"vm-list-item": true,

View File

@ -0,0 +1,54 @@
import React, { FC, memo, useCallback, useEffect, useState } from "preact/compat";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import Button from "../../../components/Main/Button/Button";
import { CopyIcon } from "../../../components/Main/Icons";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
interface Props {
field: string;
value: string;
}
const GroupLogsFieldRow: FC<Props> = ({ field, value }) => {
const copyToClipboard = useCopyToClipboard();
const [copied, setCopied] = useState<boolean>(false);
const handleCopy = useCallback(async () => {
if (copied) return;
try {
await copyToClipboard(`${field}: "${value}"`);
setCopied(true);
} catch (e) {
console.error(e);
}
}, [copied, copyToClipboard]);
useEffect(() => {
if (copied === null) return;
const timeout = setTimeout(() => setCopied(false), 2000);
return () => clearTimeout(timeout);
}, [copied]);
return (
<tr className="vm-group-logs-row-fields-item">
<td className="vm-group-logs-row-fields-item-controls">
<div className="vm-group-logs-row-fields-item-controls__wrapper">
<Tooltip title={copied ? "Copied" : "Copy to clipboard"}>
<Button
variant="text"
color="gray"
size="small"
startIcon={<CopyIcon/>}
onClick={handleCopy}
ariaLabel="copy to clipboard"
/>
</Tooltip>
</div>
</td>
<td className="vm-group-logs-row-fields-item__key">{field}</td>
<td className="vm-group-logs-row-fields-item__value">{value}</td>
</tr>
);
};
export default memo(GroupLogsFieldRow);

View File

@ -1,13 +1,15 @@
import React, { FC, useEffect, useMemo, useState } from "preact/compat";
import React, { FC, memo, useMemo } from "preact/compat";
import { Logs } from "../../../api/types";
import "./style.scss";
import useBoolean from "../../../hooks/useBoolean";
import Button from "../../../components/Main/Button/Button";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import { ArrowDownIcon, CopyIcon } from "../../../components/Main/Icons";
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
import { ArrowDownIcon } from "../../../components/Main/Icons";
import classNames from "classnames";
import { useLogsState } from "../../../state/logsPanel/LogsStateContext";
import dayjs from "dayjs";
import { DATE_TIME_FORMAT } from "../../../constants/date";
import { useTimeState } from "../../../state/time/TimeStateContext";
import GroupLogsFieldRow from "./GroupLogsFieldRow";
import { marked } from "marked";
interface Props {
log: Logs;
@ -20,40 +22,31 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
} = useBoolean(false);
const { markdownParsing } = useLogsState();
const { timezone } = useTimeState();
const excludeKeys = ["_msg", "_vmui_time", "_vmui_data", "_vmui_markdown"];
const fields = Object.entries(log).filter(([key]) => !excludeKeys.includes(key));
const formattedTime = useMemo(() => {
if (!log._time) return "";
return dayjs(log._time).tz().format(`${DATE_TIME_FORMAT}.SSS`);
}, [log._time, timezone]);
const formattedMarkdown = useMemo(() => {
if (!markdownParsing || !log._msg) return "";
return marked(log._msg.replace(/```/g, "\n```\n")) as string;
}, [log._msg, markdownParsing]);
const fields = useMemo(() => Object.entries(log).filter(([key]) => key !== "_msg"), [log]);
const hasFields = fields.length > 0;
const displayMessage = useMemo(() => {
if (log._msg) return log._msg;
if (!hasFields) return;
const dataObject = fields.reduce<{[key: string]: string}>((obj, [key, value]) => {
const dataObject = fields.reduce<{ [key: string]: string }>((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
return JSON.stringify(dataObject);
}, [log, fields, hasFields]);
const copyToClipboard = useCopyToClipboard();
const [copied, setCopied] = useState<number | null>(null);
const createCopyHandler = (copyValue: string, rowIndex: number) => async () => {
if (copied === rowIndex) return;
try {
await copyToClipboard(copyValue);
setCopied(rowIndex);
} catch (e) {
console.error(e);
}
};
useEffect(() => {
if (copied === null) return;
const timeout = setTimeout(() => setCopied(null), 2000);
return () => clearTimeout(timeout);
}, [copied]);
return (
<div className="vm-group-logs-row">
<div
@ -74,10 +67,10 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
<div
className={classNames({
"vm-group-logs-row-content__time": true,
"vm-group-logs-row-content__time_missing": !log._vmui_time
"vm-group-logs-row-content__time_missing": !formattedTime
})}
>
{log._vmui_time || "timestamp missing"}
{formattedTime || "timestamp missing"}
</div>
<div
className={classNames({
@ -85,7 +78,7 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
"vm-group-logs-row-content__msg_empty-msg": !log._msg,
"vm-group-logs-row-content__msg_missing": !displayMessage
})}
dangerouslySetInnerHTML={markdownParsing && log._vmui_markdown ? { __html: log._vmui_markdown } : undefined}
dangerouslySetInnerHTML={(markdownParsing && formattedMarkdown) ? { __html: formattedMarkdown } : undefined}
>
{displayMessage || "-"}
</div>
@ -94,28 +87,12 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
<div className="vm-group-logs-row-fields">
<table>
<tbody>
{fields.map(([key, value], i) => (
<tr
{fields.map(([key, value]) => (
<GroupLogsFieldRow
key={key}
className="vm-group-logs-row-fields-item"
>
<td className="vm-group-logs-row-fields-item-controls">
<div className="vm-group-logs-row-fields-item-controls__wrapper">
<Tooltip title={copied === i ? "Copied" : "Copy to clipboard"}>
<Button
variant="text"
color="gray"
size="small"
startIcon={<CopyIcon/>}
onClick={createCopyHandler(`${key}: "${value}"`, i)}
ariaLabel="copy to clipboard"
/>
</Tooltip>
</div>
</td>
<td className="vm-group-logs-row-fields-item__key">{key}</td>
<td className="vm-group-logs-row-fields-item__value">{value}</td>
</tr>
field={key}
value={value}
/>
))}
</tbody>
</table>
@ -125,4 +102,4 @@ const GroupLogsItem: FC<Props> = ({ log }) => {
);
};
export default GroupLogsItem;
export default memo(GroupLogsItem);

View File

@ -9,7 +9,7 @@ export const useFetchLogs = (server: string, query: string, limit: number) => {
const [searchParams] = useSearchParams();
const [logs, setLogs] = useState<Logs[]>([]);
const [isLoading, setIsLoading] = useState<{[key: number]: boolean;}>([]);
const [isLoading, setIsLoading] = useState<{ [key: number]: boolean }>({});
const [error, setError] = useState<ErrorTypes | string>();
const abortControllerRef = useRef(new AbortController());
@ -33,8 +33,9 @@ export const useFetchLogs = (server: string, query: string, limit: number) => {
const parseLineToJSON = (line: string): Logs | null => {
try {
return JSON.parse(line);
return line && JSON.parse(line);
} catch (e) {
console.error(`Failed to parse "${line}" to JSON\n`, e);
return null;
}
};
@ -56,23 +57,25 @@ export const useFetchLogs = (server: string, query: string, limit: number) => {
if (!response.ok || !response.body) {
setError(text);
setLogs([]);
setIsLoading(prev => ({ ...prev, [id]: false }));
return false;
}
const lines = text.split("\n").filter(line => line).slice(0, limit);
const data = lines.map(parseLineToJSON).filter(line => line) as Logs[];
const data = text.split("\n", limit).map(parseLineToJSON).filter(line => line) as Logs[];
setLogs(data);
setIsLoading(prev => ({ ...prev, [id]: false }));
return true;
} catch (e) {
setIsLoading(prev => ({ ...prev, [id]: false }));
if (e instanceof Error && e.name !== "AbortError") {
setError(String(e));
console.error(e);
setLogs([]);
}
return false;
} finally {
setIsLoading(prev => {
// Remove the `id` key from `isLoading` when its value becomes `false`
const { [id]: _, ...rest } = prev;
return rest;
});
}
}, [url, query, limit, searchParams]);

View File

@ -0,0 +1,152 @@
import { Dispatch, SetStateAction, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import { MetricBase, MetricResult, ExportMetricResult } from "../../../api/types";
import { ErrorTypes, SeriesLimits } from "../../../types";
import { useQueryState } from "../../../state/query/QueryStateContext";
import { useTimeState } from "../../../state/time/TimeStateContext";
import { useAppState } from "../../../state/common/StateContext";
import { useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
import { isValidHttpUrl } from "../../../utils/url";
import { getExportDataUrl } from "../../../api/query-range";
interface FetchQueryParams {
hideQuery?: number[];
showAllSeries?: boolean;
}
interface FetchQueryReturn {
fetchUrl?: string[],
isLoading: boolean,
data?: MetricResult[],
error?: ErrorTypes | string,
queryErrors: (ErrorTypes | string)[],
setQueryErrors: Dispatch<SetStateAction<string[]>>,
warning?: string,
abortFetch: () => void
}
const parseLineToJSON = (line: string): ExportMetricResult | null => {
try {
return JSON.parse(line);
} catch (e) {
return null;
}
};
export const useFetchExport = ({ hideQuery, showAllSeries }: FetchQueryParams): FetchQueryReturn => {
const { query } = useQueryState();
const { period } = useTimeState();
const { displayType, reduceMemUsage, seriesLimits: stateSeriesLimits } = useCustomPanelState();
const { serverUrl } = useAppState();
const [isLoading, setIsLoading] = useState(false);
const [data, setData] = useState<MetricResult[]>();
const [error, setError] = useState<ErrorTypes | string>();
const [queryErrors, setQueryErrors] = useState<string[]>([]);
const [warning, setWarning] = useState<string>();
const abortControllerRef = useRef(new AbortController());
const fetchUrl = useMemo(() => {
setError("");
setQueryErrors([]);
if (!period) return;
if (!serverUrl) {
setError(ErrorTypes.emptyServer);
} else if (query.every(q => !q.trim())) {
setQueryErrors(query.map(() => ErrorTypes.validQuery));
} else if (isValidHttpUrl(serverUrl)) {
const updatedPeriod = { ...period };
return query.map(q => getExportDataUrl(serverUrl, q, updatedPeriod, reduceMemUsage));
} else {
setError(ErrorTypes.validServer);
}
}, [serverUrl, period, hideQuery, reduceMemUsage]);
const fetchData = useCallback(async ( { fetchUrl, stateSeriesLimits, showAllSeries }: {
fetchUrl: string[];
stateSeriesLimits: SeriesLimits;
showAllSeries?: boolean;
}) => {
abortControllerRef.current.abort();
abortControllerRef.current = new AbortController();
const { signal } = abortControllerRef.current;
setIsLoading(true);
try {
const tempData: MetricBase[] = [];
const seriesLimit = showAllSeries ? Infinity : +stateSeriesLimits[displayType] || Infinity;
let counter = 1;
let totalLength = 0;
for await (const url of fetchUrl) {
const isHideQuery = hideQuery?.includes(counter - 1);
if (isHideQuery) {
setQueryErrors(prev => [...prev, ""]);
counter++;
continue;
}
const response = await fetch(url, { signal });
const text = await response.text();
if (!response.ok || !response.body) {
tempData.push({ metric: {}, values: [], group: counter } as MetricBase);
setError(text);
setQueryErrors(prev => [...prev, `${text}`]);
} else {
setQueryErrors(prev => [...prev, ""]);
const freeTempSize = seriesLimit - tempData.length;
const lines = text.split("\n").filter(line => line);
const lineLimited = lines.slice(0, freeTempSize).sort();
lineLimited.forEach((line: string) => {
const jsonLine = parseLineToJSON(line);
if (!jsonLine) return;
tempData.push({
group: counter,
metric: jsonLine.metric,
values: jsonLine.values.map((value, index) => [(jsonLine.timestamps[index]/1000), value]),
} as MetricBase);
});
totalLength += lines.length;
}
counter++;
}
const limitText = `Showing ${tempData.length} series out of ${totalLength} series due to performance reasons. Please narrow down the query, so it returns less series`;
setWarning(totalLength > seriesLimit ? limitText : "");
setData(tempData as MetricResult[]);
setIsLoading(false);
} catch (e) {
setIsLoading(false);
if (e instanceof Error && e.name !== "AbortError") {
setError(error);
console.error(e);
}
}
}, [displayType, hideQuery]);
const abortFetch = useCallback(() => {
abortControllerRef.current.abort();
setData([]);
}, [abortControllerRef]);
useEffect(() => {
if (!fetchUrl?.length) return;
const timer = setTimeout(fetchData, 400, { fetchUrl, stateSeriesLimits, showAllSeries });
return () => {
abortControllerRef.current?.abort();
clearTimeout(timer);
};
}, [fetchUrl, stateSeriesLimits, showAllSeries]);
return {
fetchUrl,
isLoading,
data,
error,
queryErrors,
setQueryErrors,
warning,
abortFetch,
};
};

View File

@ -0,0 +1,162 @@
import React, { FC, useState } from "preact/compat";
import LineLoader from "../../components/Main/LineLoader/LineLoader";
import { useCustomPanelState } from "../../state/customPanel/CustomPanelStateContext";
import { useQueryState } from "../../state/query/QueryStateContext";
import "../CustomPanel/style.scss";
import Alert from "../../components/Main/Alert/Alert";
import classNames from "classnames";
import useDeviceDetect from "../../hooks/useDeviceDetect";
import { useRef } from "react";
import CustomPanelTabs from "../CustomPanel/CustomPanelTabs";
import { DisplayTypeSwitch } from "../CustomPanel/DisplayTypeSwitch";
import QueryConfigurator from "../CustomPanel/QueryConfigurator/QueryConfigurator";
import WarningLimitSeries from "../CustomPanel/WarningLimitSeries/WarningLimitSeries";
import { useFetchExport } from "./hooks/useFetchExport";
import { useSetQueryParams } from "../CustomPanel/hooks/useSetQueryParams";
import { DisplayType } from "../../types";
import Hyperlink from "../../components/Main/Hyperlink/Hyperlink";
import { CloseIcon } from "../../components/Main/Icons";
import Button from "../../components/Main/Button/Button";
const RawSamplesLink = () => (
<Hyperlink
href="https://docs.victoriametrics.com/keyconcepts/#raw-samples"
underlined
>
raw samples
</Hyperlink>
);
const QueryDataLink = () => (
<Hyperlink
underlined
href="https://docs.victoriametrics.com/keyconcepts/#query-data"
>
Query API
</Hyperlink>
);
const TimeSeriesSelectorLink = () => (
<Hyperlink
underlined
href="https://docs.victoriametrics.com/keyconcepts/#filtering"
>
time series selector
</Hyperlink>
);
const RawQueryPage: FC = () => {
useSetQueryParams();
const { isMobile } = useDeviceDetect();
const { displayType } = useCustomPanelState();
const { query } = useQueryState();
const [hideQuery, setHideQuery] = useState<number[]>([]);
const [hideError, setHideError] = useState(!query[0]);
const [showAllSeries, setShowAllSeries] = useState(false);
const [showPageDescription, setShowPageDescription] = useState(true);
const {
data,
error,
isLoading,
warning,
queryErrors,
setQueryErrors,
abortFetch,
} = useFetchExport({ hideQuery, showAllSeries });
const controlsRef = useRef<HTMLDivElement>(null);
const showError = !hideError && error;
const handleHideQuery = (queries: number[]) => {
setHideQuery(queries);
};
const handleRunQuery = () => {
setHideError(false);
};
const handleHidePageDescription = () => {
setShowPageDescription(false);
};
return (
<div
className={classNames({
"vm-custom-panel": true,
"vm-custom-panel_mobile": isMobile,
})}
>
<QueryConfigurator
label={"Time series selector"}
queryErrors={!hideError ? queryErrors : []}
setQueryErrors={setQueryErrors}
setHideError={setHideError}
stats={[]}
isLoading={isLoading}
onHideQuery={handleHideQuery}
onRunQuery={handleRunQuery}
abortFetch={abortFetch}
hideButtons={{ traceQuery: true, disableCache: true }}
includeFunctions={false}
/>
{showPageDescription && (
<Alert variant="info">
<div className="vm-explore-metrics-header-description">
<p>
This page provides a dedicated view for querying and displaying <RawSamplesLink/> from VictoriaMetrics.
It expects only <TimeSeriesSelectorLink/> as a query argument.
Users often assume that the <QueryDataLink/> returns data exactly as stored,
but data samples and timestamps may be modified by the API.
</p>
<Button
variant="text"
size="small"
startIcon={<CloseIcon/>}
onClick={handleHidePageDescription}
ariaLabel="close tips"
/>
</div>
</Alert>
)}
{showError && <Alert variant="error">{error}</Alert>}
{warning && (
<WarningLimitSeries
warning={warning}
query={query}
onChange={setShowAllSeries}
/>
)}
<div
className={classNames({
"vm-custom-panel-body": true,
"vm-custom-panel-body_mobile": isMobile,
"vm-block": true,
"vm-block_mobile": isMobile,
})}
>
{isLoading && <LineLoader/>}
<div
className="vm-custom-panel-body-header"
ref={controlsRef}
>
<div className="vm-custom-panel-body-header__tabs">
<DisplayTypeSwitch tabFilter={(tab) => (tab.value !== DisplayType.table)}/>
</div>
</div>
<CustomPanelTabs
graphData={data}
liveData={data}
isHistogram={false}
displayType={displayType}
controlsRef={controlsRef}
/>
</div>
</div>
);
};
export default RawQueryPage;

View File

@ -15,6 +15,7 @@ const router = {
icons: "/icons",
anomaly: "/anomaly",
query: "/query",
rawQuery: "/raw-query",
downsamplingDebug: "/downsampling-filters-debug",
retentionDebug: "/retention-filters-debug",
};
@ -45,11 +46,15 @@ const routerOptionsDefault = {
}
};
export const routerOptions: {[key: string]: RouterOptions} = {
export const routerOptions: { [key: string]: RouterOptions } = {
[router.home]: {
title: "Query",
...routerOptionsDefault
},
[router.rawQuery]: {
title: "Raw query",
...routerOptionsDefault
},
[router.metrics]: {
title: "Explore Prometheus metrics",
header: {

View File

@ -68,6 +68,7 @@ export const getDefaultNavigation = ({
showAlertLink,
}: NavigationConfig): NavigationItem[] => [
{ value: router.home },
{ value: router.rawQuery },
{ label: "Explore", submenu: getExploreNav() },
{ label: "Tools", submenu: getToolsNav(isEnterpriseLicense) },
{ value: router.dashboards, hide: !showPredefinedDashboards },

View File

@ -10,6 +10,7 @@ export interface CustomPanelState {
isTracingEnabled: boolean;
seriesLimits: SeriesLimits
tableCompact: boolean;
reduceMemUsage: boolean;
}
export type CustomPanelAction =
@ -18,6 +19,7 @@ export type CustomPanelAction =
| { type: "TOGGLE_NO_CACHE"}
| { type: "TOGGLE_QUERY_TRACING" }
| { type: "TOGGLE_TABLE_COMPACT" }
| { type: "TOGGLE_REDUCE_MEM_USAGE"}
export const getInitialDisplayType = () => {
const queryTab = getQueryStringValue("g0.tab", 0) as string;
@ -33,6 +35,7 @@ export const initialCustomPanelState: CustomPanelState = {
isTracingEnabled: false,
seriesLimits: limitsStorage ? JSON.parse(limitsStorage) : DEFAULT_MAX_SERIES,
tableCompact: getFromStorage("TABLE_COMPACT") as boolean || false,
reduceMemUsage: false
};
export function reducer(state: CustomPanelState, action: CustomPanelAction): CustomPanelState {
@ -65,6 +68,12 @@ export function reducer(state: CustomPanelState, action: CustomPanelAction): Cus
...state,
tableCompact: !state.tableCompact
};
case "TOGGLE_REDUCE_MEM_USAGE":
saveToStorage("TABLE_COMPACT", !state.reduceMemUsage);
return {
...state,
reduceMemUsage: !state.reduceMemUsage
};
default:
throw new Error();
}

View File

@ -23,6 +23,7 @@ export interface SeriesItem extends Series {
median: number;
forecast?: ForecastType | null;
forecastGroup?: string;
hasAlias?: boolean;
}
export interface HideSeriesArgs {
@ -45,6 +46,7 @@ export interface LegendItemType {
freeFormFields: {[key: string]: string};
statsFormatted: SeriesItemStatsFormatted;
median: number
hasAlias: boolean;
}
export interface BarSeriesItem {

View File

@ -2,16 +2,23 @@ import { MetricBase } from "../api/types";
export const getNameForMetric = (result: MetricBase, alias?: string, showQueryNum = true): string => {
const { __name__, ...freeFormFields } = result.metric;
const name = alias || `${showQueryNum ? `[Query ${result.group}] ` : ""}${__name__ || ""}`;
if (Object.keys(freeFormFields).length == 0) {
if (!name) {
return "value";
}
return name;
const queryPrefix = showQueryNum ? `[Query ${result.group}] ` : "";
if (alias) {
return alias.replace(/\{\{(\w+)}}/g, (_, key) => result.metric[key] || "");
}
return `${name}{${Object.entries(freeFormFields).map(e =>
`${e[0]}=${JSON.stringify(e[1])}`
).join(", ")}}`;
const name = `${queryPrefix}${__name__ || ""}`;
if (Object.keys(freeFormFields).length === 0) {
return name || "value";
}
const fieldsString = Object.entries(freeFormFields)
.map(([key, value]) => `${key}=${JSON.stringify(value)}`)
.join(", ");
return `${name}{${fieldsString}}`;
};
export const promValueToNumber = (s: string): number => {

View File

@ -1,5 +1,6 @@
import uPlot from "uplot";
import { MetricResult } from "../../api/types";
import { SeriesItem } from "../../types";
export const formatTicks = (u: uPlot, ticks: number[], unit = ""): string[] => {
const min = ticks[0];
@ -53,7 +54,11 @@ export const getDashLine = (group: number): number[] => {
return group <= 1 ? [] : [group*4, group*1.2];
};
export const getMetricName = (metricItem: MetricResult) => {
export const getMetricName = (metricItem: MetricResult, seriesItem: SeriesItem) => {
if (seriesItem?.hasAlias && seriesItem?.label) {
return seriesItem.label;
}
const metric = metricItem?.metric || {};
const labelNames = Object.keys(metric).filter(x => x != "__name__");
const labels = labelNames.map(key => `${key}=${JSON.stringify(metric[key])}`);

View File

@ -42,10 +42,12 @@ export const getSeriesItemContext = (data: MetricResult[], hideSeries: string[],
return (d: MetricResult, i: number): SeriesItem => {
const metricInfo = isAnomalyUI ? isForecast(data[i].metric) : null;
const label = isAnomalyUI ? metricInfo?.group || "" : getNameForMetric(d, alias[d.group - 1]);
const aliasValue = alias[d.group - 1];
const label = isAnomalyUI ? metricInfo?.group || "" : getNameForMetric(d, aliasValue);
return {
label,
hasAlias: Boolean(aliasValue),
dash: getDashSeries(metricInfo),
width: getWidthSeries(metricInfo),
stroke: getStrokeSeries({ metricInfo, label, isAnomalyUI, colorState }),
@ -88,6 +90,7 @@ export const getLegendItem = (s: SeriesItem, group: number): LegendItemType => (
freeFormFields: s.freeFormFields,
statsFormatted: s.statsFormatted,
median: s.median,
hasAlias: s.hasAlias || false,
});
export const getHideSeries = ({ hideSeries, legend, metaKey, series, isAnomalyView }: HideSeriesArgs): string[] => {
@ -185,7 +188,27 @@ const getPointsSeries = (metricInfo: ForecastMetricInfo | null): uPlotSeries.Poi
if (isAnomalyMetric) {
return { size: 8, width: 4, space: 0 };
}
return { size: 4.2, width: 1.4 };
return {
size: 4,
width: 0,
show: true,
filter: filterPoints,
};
};
const filterPoints = (self: uPlot, seriesIdx: number): number[] | null => {
const data = self.data[seriesIdx];
const indices = [];
for (let i = 0; i < data.length; i++) {
const prev = data[i - 1];
const next = data[i + 1];
if (prev === null && next === null) {
indices.push(i);
}
}
return indices;
};
type GetStrokeSeriesArgs = {

View File

@ -1,6 +1,7 @@
package apptest
import (
"bytes"
"io"
"net/http"
"net/url"
@ -36,13 +37,13 @@ func (c *Client) CloseConnections() {
// the response body to the caller.
func (c *Client) Get(t *testing.T, url string, wantStatusCode int) string {
t.Helper()
return c.do(t, http.MethodGet, url, "", "", wantStatusCode)
return c.do(t, http.MethodGet, url, "", nil, wantStatusCode)
}
// Post sends a HTTP POST request. Once the function receives a response, it
// checks whether the response status code matches the expected one and returns
// the response body to the caller.
func (c *Client) Post(t *testing.T, url, contentType, data string, wantStatusCode int) string {
func (c *Client) Post(t *testing.T, url, contentType string, data []byte, wantStatusCode int) string {
t.Helper()
return c.do(t, http.MethodPost, url, contentType, data, wantStatusCode)
}
@ -52,16 +53,16 @@ func (c *Client) Post(t *testing.T, url, contentType, data string, wantStatusCod
// matches the expected one and returns the response body to the caller.
func (c *Client) PostForm(t *testing.T, url string, data url.Values, wantStatusCode int) string {
t.Helper()
return c.Post(t, url, "application/x-www-form-urlencoded", data.Encode(), wantStatusCode)
return c.Post(t, url, "application/x-www-form-urlencoded", []byte(data.Encode()), wantStatusCode)
}
// do prepares a HTTP request, sends it to the server, receives the response
// from the server, ensures then response code matches the expected one, reads
// the rentire response body and returns it to the caller.
func (c *Client) do(t *testing.T, method, url, contentType, data string, wantStatusCode int) string {
func (c *Client) do(t *testing.T, method, url, contentType string, data []byte, wantStatusCode int) string {
t.Helper()
req, err := http.NewRequest(method, url, strings.NewReader(data))
req, err := http.NewRequest(method, url, bytes.NewReader(data))
if err != nil {
t.Fatalf("could not create a HTTP request: %v", err)
}
@ -128,3 +129,31 @@ func (app *ServesMetrics) GetMetric(t *testing.T, metricName string) float64 {
t.Fatalf("metic not found: %s", metricName)
return 0
}
// GetMetricsByPrefix retrieves the values of all metrics that start with given
// prefix.
func (app *ServesMetrics) GetMetricsByPrefix(t *testing.T, prefix string) []float64 {
t.Helper()
values := []float64{}
metrics := app.cli.Get(t, app.metricsURL, http.StatusOK)
for _, metric := range strings.Split(metrics, "\n") {
if !strings.HasPrefix(metric, prefix) {
continue
}
parts := strings.Split(metric, " ")
if len(parts) < 2 {
t.Fatalf("unexpected record format: got %q, want metric name and value separated by a space", metric)
}
value, err := strconv.ParseFloat(parts[len(parts)-1], 64)
if err != nil {
t.Fatalf("could not parse metric value %s: %v", metric, err)
}
values = append(values, value)
}
return values
}

View File

@ -3,27 +3,83 @@ package apptest
import (
"encoding/json"
"fmt"
"net/url"
"slices"
"strconv"
"strings"
"testing"
"time"
pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
// PrometheusQuerier contains methods available to Prometheus-like HTTP API for Querying
type PrometheusQuerier interface {
PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse
PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse
}
// PrometheusWriter contains methods available to Prometheus-like HTTP API for Writing new data
type PrometheusWriter interface {
PrometheusAPIV1Write(t *testing.T, records []pb.TimeSeries, opts QueryOpts)
PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts)
}
// StorageFlusher defines a method that forces the flushing of data inserted
// into the storage, so it becomes available for searching immediately.
type StorageFlusher interface {
ForceFlush(t *testing.T)
}
// PrometheusWriteQuerier encompasses the methods for writing, flushing and
// querying the data.
type PrometheusWriteQuerier interface {
PrometheusWriter
PrometheusQuerier
StorageFlusher
}
// QueryOpts contains various params used for querying or ingesting data
type QueryOpts struct {
Tenant string
Timeout string
Tenant string
Timeout string
Start string
End string
Time string
Step string
ExtraFilters []string
ExtraLabels []string
}
func (qos *QueryOpts) asURLValues() url.Values {
uv := make(url.Values)
addNonEmpty := func(name string, values ...string) {
for _, value := range values {
if len(value) == 0 {
continue
}
uv.Add(name, value)
}
}
addNonEmpty("start", qos.Start)
addNonEmpty("end", qos.End)
addNonEmpty("time", qos.Time)
addNonEmpty("step", qos.Step)
addNonEmpty("timeout", qos.Timeout)
addNonEmpty("extra_label", qos.ExtraLabels...)
addNonEmpty("extra_filters", qos.ExtraFilters...)
return uv
}
// getTenant returns tenant with optional default value
func (qos *QueryOpts) getTenant() string {
if qos.Tenant == "" {
return "0"
}
return qos.Tenant
}
// PrometheusAPIV1QueryResponse is an inmemory representation of the
@ -40,7 +96,7 @@ func NewPrometheusAPIV1QueryResponse(t *testing.T, s string) *PrometheusAPIV1Que
res := &PrometheusAPIV1QueryResponse{}
if err := json.Unmarshal([]byte(s), res); err != nil {
t.Fatalf("could not unmarshal query response: %v", err)
t.Fatalf("could not unmarshal query response data=\n%s\n: %v", string(s), err)
}
return res
}
@ -81,7 +137,7 @@ func NewSample(t *testing.T, timeStr string, value float64) *Sample {
// UnmarshalJSON populates the sample fields from a JSON string.
func (s *Sample) UnmarshalJSON(b []byte) error {
var (
ts int64
ts float64
v string
)
raw := []any{&ts, &v}
@ -91,7 +147,7 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
if got, want := len(raw), 2; got != want {
return fmt.Errorf("unexpected number of fields: got %d, want %d (raw sample: %s)", got, want, string(b))
}
s.Timestamp = ts
s.Timestamp = int64(ts)
var err error
s.Value, err = strconv.ParseFloat(v, 64)
if err != nil {
@ -115,7 +171,23 @@ func NewPrometheusAPIV1SeriesResponse(t *testing.T, s string) *PrometheusAPIV1Se
res := &PrometheusAPIV1SeriesResponse{}
if err := json.Unmarshal([]byte(s), res); err != nil {
t.Fatalf("could not unmarshal series response: %v", err)
t.Fatalf("could not unmarshal series response data:\n%s\n err: %v", string(s), err)
}
return res
}
// Sort sorts the response data.
func (r *PrometheusAPIV1SeriesResponse) Sort() {
str := func(m map[string]string) string {
s := []string{}
for k, v := range m {
s = append(s, k+v)
}
slices.Sort(s)
return strings.Join(s, "")
}
slices.SortFunc(r.Data, func(a, b map[string]string) int {
return strings.Compare(str(a), str(b))
})
}

View File

@ -1,9 +1,12 @@
package apptest
import (
"fmt"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/google/go-cmp/cmp"
)
// TestCase holds the state and defines clean-up procedure common for all test
@ -51,6 +54,17 @@ func (tc *TestCase) Stop() {
}
}
// MustStartDefaultVmsingle is a test helper function that starts an instance of
// vmsingle with defaults suitable for most tests.
func (tc *TestCase) MustStartDefaultVmsingle() *Vmsingle {
tc.t.Helper()
return tc.MustStartVmsingle("vmsingle", []string{
"-storageDataPath=" + tc.Dir() + "/vmsingle",
"-retentionPeriod=100y",
})
}
// MustStartVmsingle is a test helper function that starts an instance of
// vmsingle and fails the test if the app fails to start.
func (tc *TestCase) MustStartVmsingle(instance string, flags []string) *Vmsingle {
@ -103,6 +117,125 @@ func (tc *TestCase) MustStartVminsert(instance string, flags []string) *Vminsert
return app
}
type vmcluster struct {
*Vminsert
*Vmselect
vmstorages []*Vmstorage
}
func (c *vmcluster) ForceFlush(t *testing.T) {
for _, s := range c.vmstorages {
s.ForceFlush(t)
}
}
// MustStartDefaultCluster is a typical cluster configuration suitable for most
// tests.
//
// The cluster consists of two vmstorages, one vminsert and one vmselect, no
// data replication.
//
// Such configuration is suitable for tests that don't verify the
// cluster-specific behavior (such as sharding, replication, or multilevel
// vmselect) but instead just need a typical cluster configuration to verify
// some business logic (such as API surface, or MetricsQL). Such cluster
// tests usually come paired with corresponding vmsingle tests.
func (tc *TestCase) MustStartDefaultCluster() PrometheusWriteQuerier {
tc.t.Helper()
vmstorage1 := tc.MustStartVmstorage("vmstorage-1", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-1",
"-retentionPeriod=100y",
})
vmstorage2 := tc.MustStartVmstorage("vmstorage-2", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-2",
"-retentionPeriod=100y",
})
vminsert := tc.MustStartVminsert("vminsert", []string{
"-storageNode=" + vmstorage1.VminsertAddr() + "," + vmstorage2.VminsertAddr(),
})
vmselect := tc.MustStartVmselect("vmselect", []string{
"-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(),
})
return &vmcluster{vminsert, vmselect, []*Vmstorage{vmstorage1, vmstorage2}}
}
func (tc *TestCase) addApp(app Stopper) {
tc.startedApps = append(tc.startedApps, app)
}
// AssertOptions hold the assertion params, such as got and wanted values as
// well as the message that should be included into the assertion error message
// in case of failure.
//
// In VictoriaMetrics (especially the cluster version) the inserted data does
// not become visible for querying right away. Therefore, the first comparisons
// may fail. AssertOptions allow to configure how many times the actual result
// must be retrieved and compared with the expected one and for long to wait
// between the retries. If these two params (`Retries` and `Period`) are not
// set, the default values will be used.
//
// If it is known that the data is available, then the retry functionality can
// be disabled by setting the `DoNotRetry` field.
//
// AssertOptions are used by the TestCase.Assert() method, and this method uses
// cmp.Diff() from go-cmp package for comparing got and wanted values.
// AssertOptions, therefore, allows to pass cmp.Options to cmp.Diff() via
// `CmpOpts` field.
//
// Finally the `FailNow` field controls whether the assertion should fail using
// `testing.T.Errorf()` or `testing.T.Fatalf()`.
type AssertOptions struct {
Msg string
Got func() any
Want any
CmpOpts []cmp.Option
DoNotRetry bool
Retries int
Period time.Duration
FailNow bool
}
// Assert compares the actual result with the expected one possibly multiple
// times in order to account for the fact that the inserted data does not become
// available for querying right away (especially in cluster version of
// VictoriaMetrics).
func (tc *TestCase) Assert(opts *AssertOptions) {
tc.t.Helper()
const (
defaultRetries = 20
defaultPeriod = 100 * time.Millisecond
)
if opts.DoNotRetry {
opts.Retries = 1
opts.Period = 0
} else {
if opts.Retries <= 0 {
opts.Retries = defaultRetries
}
if opts.Period <= 0 {
opts.Period = defaultPeriod
}
}
var diff string
for range opts.Retries {
diff = cmp.Diff(opts.Want, opts.Got(), opts.CmpOpts...)
if diff == "" {
return
}
time.Sleep(opts.Period)
}
msg := fmt.Sprintf("%s (-want, +got):\n%s", opts.Msg, diff)
if opts.FailNow {
tc.t.Fatal(msg)
} else {
tc.t.Error(msg)
}
}

View File

@ -2,7 +2,6 @@ package tests
import (
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/google/go-cmp/cmp"
@ -29,78 +28,48 @@ var docData = []string{
}
// TestSingleKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
// for vm-single.
func TestSingleKeyConceptsQuery(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
vmsingle := tc.MustStartVmsingle("vmsingle", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage",
"-retentionPeriod=100y",
})
sut := tc.MustStartDefaultVmsingle()
opts := apptest.QueryOpts{Timeout: "5s"}
// Insert example data from documentation.
vmsingle.PrometheusAPIV1ImportPrometheus(t, docData, opts)
vmsingle.ForceFlush(t)
testInstantQuery(t, vmsingle, opts)
testRangeQuery(t, vmsingle, opts)
testRangeQueryIsEquivalentToManyInstantQueries(t, vmsingle, opts)
testKeyConceptsQueryData(t, sut)
}
// TestClusterKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
func TestClusterKeyConceptsQuery(t *testing.T) {
// TestClusterKeyConceptsQueryData verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
// for vm-cluster.
func TestClusterKeyConceptsQueryData(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
// Set up the following cluster configuration:
//
// - two vmstorage instances
// - vminsert points to the two vmstorages, its replication setting
// is off which means it will only shard the incoming data across the two
// vmstorages.
// - vmselect points to the two vmstorages and is expected to query both
// vmstorages and build the full result out of the two partial results.
sut := tc.MustStartDefaultCluster()
vmstorage1 := tc.MustStartVmstorage("vmstorage-1", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-1",
"-retentionPeriod=100y",
})
vmstorage2 := tc.MustStartVmstorage("vmstorage-2", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage-2",
"-retentionPeriod=100y",
})
vminsert := tc.MustStartVminsert("vminsert", []string{
"-storageNode=" + vmstorage1.VminsertAddr() + "," + vmstorage2.VminsertAddr(),
})
vmselect := tc.MustStartVmselect("vmselect", []string{
"-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(),
})
testKeyConceptsQueryData(t, sut)
}
opts := apptest.QueryOpts{Timeout: "5s", Tenant: "0"}
// testClusterKeyConceptsQuery verifies cases from https://docs.victoriametrics.com/keyconcepts/#query-data
func testKeyConceptsQueryData(t *testing.T, sut apptest.PrometheusWriteQuerier) {
// Insert example data from documentation.
vminsert.PrometheusAPIV1ImportPrometheus(t, docData, opts)
time.Sleep(2 * time.Second)
sut.PrometheusAPIV1ImportPrometheus(t, docData, apptest.QueryOpts{})
sut.ForceFlush(t)
vmstorage1.ForceFlush(t)
vmstorage2.ForceFlush(t)
testInstantQuery(t, vmselect, opts)
testRangeQuery(t, vmselect, opts)
testRangeQueryIsEquivalentToManyInstantQueries(t, vmselect, opts)
testInstantQuery(t, sut)
testRangeQuery(t, sut)
testRangeQueryIsEquivalentToManyInstantQueries(t, sut)
}
// testInstantQuery verifies the statements made in the `Instant query` section
// of the VictoriaMetrics documentation. See:
// https://docs.victoriametrics.com/keyconcepts/#instant-query
func testInstantQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.QueryOpts) {
func testInstantQuery(t *testing.T, q apptest.PrometheusQuerier) {
// Get the value of the foo_bar time series at 2022-05-10T08:03:00Z with the
// step of 5m and timeout 5s. There is no sample at exactly this timestamp.
// Therefore, VictoriaMetrics will search for the nearest sample within the
// [time-5m..time] interval.
got := q.PrometheusAPIV1Query(t, "foo_bar", "2022-05-10T08:03:00.000Z", "5m", opts)
got := q.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{Time: "2022-05-10T08:03:00.000Z", Step: "5m"})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[{"metric":{"__name__":"foo_bar"},"value":[1652169780,"3"]}]}}`)
opt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
if diff := cmp.Diff(want, got, opt); diff != "" {
@ -112,7 +81,7 @@ func testInstantQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.Qu
// Therefore, VictoriaMetrics will search for the nearest sample within the
// [time-1m..time] interval. Since the nearest sample is 2m away and the
// step is 1m, then the VictoriaMetrics must return empty response.
got = q.PrometheusAPIV1Query(t, "foo_bar", "2022-05-10T08:18:00.000Z", "1m", opts)
got = q.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{Time: "2022-05-10T08:18:00.000Z", Step: "1m"})
if len(got.Data.Result) > 0 {
t.Errorf("unexpected response: got non-empty result, want empty result:\n%v", got)
}
@ -121,11 +90,11 @@ func testInstantQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.Qu
// testRangeQuery verifies the statements made in the `Range query` section of
// the VictoriaMetrics documentation. See:
// https://docs.victoriametrics.com/keyconcepts/#range-query
func testRangeQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.QueryOpts) {
func testRangeQuery(t *testing.T, q apptest.PrometheusQuerier) {
f := func(start, end, step string, wantSamples []*apptest.Sample) {
t.Helper()
got := q.PrometheusAPIV1QueryRange(t, "foo_bar", start, end, step, opts)
got := q.PrometheusAPIV1QueryRange(t, "foo_bar", apptest.QueryOpts{Start: start, End: end, Step: step})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": [{"metric": {"__name__": "foo_bar"}, "values": []}]}}`)
want.Data.Result[0].Samples = wantSamples
opt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
@ -192,11 +161,11 @@ func testRangeQuery(t *testing.T, q apptest.PrometheusQuerier, opts apptest.Quer
// query is actually an instant query executed 1 + (start-end)/step times on the
// time range from start to end. See:
// https://docs.victoriametrics.com/keyconcepts/#range-query
func testRangeQueryIsEquivalentToManyInstantQueries(t *testing.T, q apptest.PrometheusQuerier, opts apptest.QueryOpts) {
func testRangeQueryIsEquivalentToManyInstantQueries(t *testing.T, q apptest.PrometheusQuerier) {
f := func(timestamp string, want *apptest.Sample) {
t.Helper()
gotInstant := q.PrometheusAPIV1Query(t, "foo_bar", timestamp, "1m", opts)
gotInstant := q.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{Time: timestamp, Step: "1m"})
if want == nil {
if got, want := len(gotInstant.Data.Result), 0; got != want {
t.Errorf("unexpected instant result size: got %d, want %d", got, want)
@ -209,7 +178,11 @@ func testRangeQueryIsEquivalentToManyInstantQueries(t *testing.T, q apptest.Prom
}
}
rangeRes := q.PrometheusAPIV1QueryRange(t, "foo_bar", "2022-05-10T07:59:00.000Z", "2022-05-10T08:17:00.000Z", "1m", opts)
rangeRes := q.PrometheusAPIV1QueryRange(t, "foo_bar", apptest.QueryOpts{
Start: "2022-05-10T07:59:00.000Z",
End: "2022-05-10T08:17:00.000Z",
Step: "1m",
})
rangeSamples := rangeRes.Data.Result[0].Samples
f("2022-05-10T07:59:00.000Z", nil)

View File

@ -0,0 +1,130 @@
package tests
import (
"fmt"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
func millis(s string) int64 {
t, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(fmt.Sprintf("could not parse time %q: %v", s, err))
}
return t.UnixMilli()
}
var staleNaNsData = func() []pb.TimeSeries {
return []pb.TimeSeries{
{
Labels: []pb.Label{
{
Name: "__name__",
Value: "metric",
},
},
Samples: []pb.Sample{
{
Value: 1,
Timestamp: millis("2024-01-01T00:01:00Z"),
},
{
Value: decimal.StaleNaN,
Timestamp: millis("2024-01-01T00:02:00Z"),
},
},
},
}
}()
func TestSingleInstantQueryDoesNotReturnStaleNaNs(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
sut := tc.MustStartDefaultVmsingle()
testInstantQueryDoesNotReturnStaleNaNs(t, sut)
}
func TestClusterInstantQueryDoesNotReturnStaleNaNs(t *testing.T) {
tc := apptest.NewTestCase(t)
defer tc.Stop()
sut := tc.MustStartDefaultCluster()
testInstantQueryDoesNotReturnStaleNaNs(t, sut)
}
func testInstantQueryDoesNotReturnStaleNaNs(t *testing.T, sut apptest.PrometheusWriteQuerier) {
sut.PrometheusAPIV1Write(t, staleNaNsData, apptest.QueryOpts{})
sut.ForceFlush(t)
var got, want *apptest.PrometheusAPIV1QueryResponse
cmpOptions := []cmp.Option{
cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType"),
cmpopts.EquateNaNs(),
}
// Verify that instant query returns the first point.
got = sut.PrometheusAPIV1Query(t, "metric", apptest.QueryOpts{
Step: "5m",
Time: "2024-01-01T00:01:00.000Z",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": [{"metric": {"__name__": "metric"}}]}}`)
want.Data.Result[0].Sample = apptest.NewSample(t, "2024-01-01T00:01:00Z", 1)
if diff := cmp.Diff(want, got, cmpOptions...); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Verify that instant query does not return stale NaN.
got = sut.PrometheusAPIV1Query(t, "metric", apptest.QueryOpts{
Step: "5m",
Time: "2024-01-01T00:02:00.000Z",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": []}}`)
// Empty response, stale NaN is not included into response
if diff := cmp.Diff(want, got, cmpOptions...); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Verify that instant query with default rollup function returns stale NaN
// while it must not.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5806
got = sut.PrometheusAPIV1Query(t, "metric[2m]", apptest.QueryOpts{
Step: "5m",
Time: "2024-01-01T00:02:00.000Z",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": [{"metric": {"__name__": "metric"}, "values": []}]}}`)
s := make([]*apptest.Sample, 2)
s[0] = apptest.NewSample(t, "2024-01-01T00:01:00Z", 1)
s[1] = apptest.NewSample(t, "2024-01-01T00:02:00Z", decimal.StaleNaN)
want.Data.Result[0].Samples = s
if diff := cmp.Diff(want, got, cmpOptions...); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// Verify that exported data contains stale NaN.
got = sut.PrometheusAPIV1Export(t, `{__name__="metric"}`, apptest.QueryOpts{
Start: "2024-01-01T00:01:00.000Z",
End: "2024-01-01T00:02:00.000Z",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t, `{"data": {"result": [{"metric": {"__name__": "metric"}, "values": []}]}}`)
s = make([]*apptest.Sample, 2)
s[0] = apptest.NewSample(t, "2024-01-01T00:01:00Z", 1)
s[1] = apptest.NewSample(t, "2024-01-01T00:02:00Z", decimal.StaleNaN)
want.Data.Result[0].Samples = s
if diff := cmp.Diff(want, got, cmpOptions...); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"math/rand/v2"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
)
@ -33,30 +32,41 @@ func TestClusterMultilevelSelect(t *testing.T) {
"-storageNode=" + vmselectL1.ClusternativeListenAddr(),
})
// Insert 1000 unique time series.Wait for 2 seconds to let vmstorage
// flush pending items so they become searchable.
// Insert 1000 unique time series.
const numMetrics = 1000
records := make([]string, numMetrics)
want := &apptest.PrometheusAPIV1SeriesResponse{
Status: "success",
IsPartial: false,
Data: make([]map[string]string, numMetrics),
}
for i := range numMetrics {
records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000))
name := fmt.Sprintf("metric_%d", i)
records[i] = fmt.Sprintf("%s %d", name, rand.IntN(1000))
want.Data[i] = map[string]string{"__name__": name}
}
vminsert.PrometheusAPIV1ImportPrometheus(t, records, apptest.QueryOpts{Tenant: "0"})
time.Sleep(2 * time.Second)
want.Sort()
qopts := apptest.QueryOpts{Tenant: "0"}
vminsert.PrometheusAPIV1ImportPrometheus(t, records, qopts)
vmstorage.ForceFlush(t)
// Retrieve all time series and verify that vmselect (L1) serves the complete
// set of time series.
// Retrieve all time series and verify that both vmselect (L1) and
// vmselect (L2) serve the complete set of time series.
seriesL1 := vmselectL1.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := len(seriesL1.Data), numMetrics; got != want {
t.Fatalf("unexpected level-1 series count: got %d, want %d", got, want)
}
// Retrieve all time series and verify that vmselect (L2) serves the complete
// set of time series.
seriesL2 := vmselectL2.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := len(seriesL2.Data), numMetrics; got != want {
t.Fatalf("unexpected level-2 series count: got %d, want %d", got, want)
got := func(app *apptest.Vmselect) any {
res := app.PrometheusAPIV1Series(t, `{__name__=~".*"}`, qopts)
res.Sort()
return res
}
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected level-1 series count",
Got: func() any { return got(vmselectL1) },
Want: want,
})
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected level-2 series count",
Got: func() any { return got(vmselectL2) },
Want: want,
})
}

View File

@ -0,0 +1,174 @@
package tests
import (
"os"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
func TestClusterMultiTenantSelect(t *testing.T) {
os.RemoveAll(t.Name())
cmpOpt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1QueryResponse{}, "Status", "Data.ResultType")
cmpSROpt := cmpopts.IgnoreFields(apptest.PrometheusAPIV1SeriesResponse{}, "Status", "IsPartial")
tc := apptest.NewTestCase(t)
defer tc.Stop()
vmstorage := tc.MustStartVmstorage("vmstorage", []string{
"-storageDataPath=" + tc.Dir() + "/vmstorage",
"-retentionPeriod=100y",
})
vminsert := tc.MustStartVminsert("vminsert", []string{
"-storageNode=" + vmstorage.VminsertAddr(),
})
vmselect := tc.MustStartVmselect("vmselect", []string{
"-storageNode=" + vmstorage.VmselectAddr(),
"-search.tenantCacheExpireDuration=0",
})
var commonSamples = []string{
`foo_bar 1.00 1652169600000`, // 2022-05-10T08:00:00Z
`foo_bar 2.00 1652169660000`, // 2022-05-10T08:01:00Z
`foo_bar 3.00 1652169720000`, // 2022-05-10T08:02:00Z
}
// test for empty tenants request
got := vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Tenant: "multitenant",
Step: "5m",
Time: "2022-05-10T08:03:00.000Z",
})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[]}}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// ingest per tenant data and verify it with search
tenantIDs := []string{"1:1", "1:15"}
instantCT := "2022-05-10T08:05:00.000Z"
for _, tenantID := range tenantIDs {
vminsert.PrometheusAPIV1ImportPrometheus(t, commonSamples, apptest.QueryOpts{Tenant: tenantID})
vmstorage.ForceFlush(t)
got := vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Tenant: tenantID, Time: instantCT,
})
want := apptest.NewPrometheusAPIV1QueryResponse(t, `{"data":{"result":[{"metric":{"__name__":"foo_bar"},"value":[1652169900,"3"]}]}}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}
// verify all tenants searchable with multitenant APIs
// /api/v1/query
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result":[
{"metric":{"__name__":"foo_bar","vm_account_id":"1","vm_project_id": "1"},"value":[1652169900,"3"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"1","vm_project_id":"15"},"value":[1652169900,"3"]}
]
}
}`,
)
got = vmselect.PrometheusAPIV1Query(t, "foo_bar", apptest.QueryOpts{
Tenant: "multitenant",
Time: instantCT,
})
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// /api/v1/query_range aggregated by tenant labels
query := "sum(foo_bar) by(vm_account_id,vm_project_id)"
got = vmselect.PrometheusAPIV1QueryRange(t, query, apptest.QueryOpts{
Tenant: "multitenant",
Start: "2022-05-10T07:59:00.000Z",
End: "2022-05-10T08:05:00.000Z",
Step: "1m",
})
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result": [
{"metric": {"vm_account_id": "1","vm_project_id":"1"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]},
{"metric": {"vm_account_id": "1","vm_project_id":"15"}, "values": [[1652169600,"1"],[1652169660,"2"],[1652169720,"3"],[1652169780,"3"]]}
]
}
}`)
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// verify /api/v1/series response
wantSR := apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"1"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"}
]
}`)
wantSR.Sort()
gotSR := vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Tenant: "multitenant",
Start: "2022-05-10T08:03:00.000Z",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// test multitenant ingest path, tenants must be populated from labels
//
var tenantLabelsSamples = []string{
`foo_bar{vm_account_id="5"} 1.00 1652169600000`, // 2022-05-10T08:00:00Z'
`foo_bar{vm_project_id="10"} 2.00 1652169660000`, // 2022-05-10T08:01:00Z
`foo_bar{vm_account_id="5",vm_project_id="15"} 3.00 1652169720000`, // 2022-05-10T08:02:00Z
}
vminsert.PrometheusAPIV1ImportPrometheus(t, tenantLabelsSamples, apptest.QueryOpts{Tenant: "multitenant"})
vmstorage.ForceFlush(t)
// /api/v1/query with query filters
want = apptest.NewPrometheusAPIV1QueryResponse(t,
`{"data":
{"result":[
{"metric":{"__name__":"foo_bar","vm_account_id":"5","vm_project_id": "0"},"value":[1652169900,"1"]},
{"metric":{"__name__":"foo_bar","vm_account_id":"5","vm_project_id":"15"},"value":[1652169900,"3"]}
]
}
}`,
)
got = vmselect.PrometheusAPIV1Query(t, `foo_bar{vm_account_id="5"}`, apptest.QueryOpts{
Time: instantCT,
Tenant: "multitenant",
})
if diff := cmp.Diff(want, got, cmpOpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
// /api/v1/series with extra_filters
wantSR = apptest.NewPrometheusAPIV1SeriesResponse(t,
`{"data": [
{"__name__":"foo_bar", "vm_account_id":"5", "vm_project_id":"15"},
{"__name__":"foo_bar", "vm_account_id":"1", "vm_project_id":"15"}
]
}`)
wantSR.Sort()
gotSR = vmselect.PrometheusAPIV1Series(t, "foo_bar", apptest.QueryOpts{
Start: "2022-05-10T08:00:00.000Z",
End: "2022-05-10T08:30:00.000Z",
ExtraFilters: []string{`{vm_project_id="15"}`},
Tenant: "multitenant",
})
gotSR.Sort()
if diff := cmp.Diff(wantSR, gotSR, cmpSROpt); diff != "" {
t.Errorf("unexpected response (-want, +got):\n%s", diff)
}
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"math/rand/v2"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/apptest"
)
@ -35,20 +34,28 @@ func TestClusterVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.
"-storageNode=" + vmstorage1.VmselectAddr() + "," + vmstorage2.VmselectAddr(),
})
// Insert 1000 unique time series and verify the that inserted data has been
// indeed sharded by checking various metrics exposed by vminsert and
// vmstorage.
// Also wait for 2 seconds to let vminsert and vmstorage servers to update
// the values of the metrics they expose and to let vmstorages flush pending
// items so they become searchable.
// Insert 1000 unique time series.
const numMetrics = 1000
records := make([]string, numMetrics)
for i := range numMetrics {
records[i] = fmt.Sprintf("metric_%d %d", i, rand.IntN(1000))
want := &apptest.PrometheusAPIV1SeriesResponse{
Status: "success",
IsPartial: false,
Data: make([]map[string]string, numMetrics),
}
vminsert.PrometheusAPIV1ImportPrometheus(t, records, apptest.QueryOpts{Tenant: "0"})
time.Sleep(2 * time.Second)
for i := range numMetrics {
name := fmt.Sprintf("metric_%d", i)
records[i] = fmt.Sprintf("%s %d", name, rand.IntN(1000))
want.Data[i] = map[string]string{"__name__": name}
}
want.Sort()
qopts := apptest.QueryOpts{Tenant: "0"}
vminsert.PrometheusAPIV1ImportPrometheus(t, records, qopts)
vmstorage1.ForceFlush(t)
vmstorage2.ForceFlush(t)
// Verify that inserted data has been indeed sharded by checking metrics
// exposed by vmstorage.
numMetrics1 := vmstorage1.GetIntMetric(t, "vm_vminsert_metrics_read_total")
if numMetrics1 == 0 {
@ -63,16 +70,15 @@ func TestClusterVminsertShardsDataVmselectBuildsFullResultFromShards(t *testing.
}
// Retrieve all time series and verify that vmselect serves the complete set
//of time series.
// of time series.
series := vmselect.PrometheusAPIV1Series(t, `{__name__=~".*"}`, apptest.QueryOpts{Tenant: "0"})
if got, want := series.Status, "success"; got != want {
t.Fatalf("unexpected /ap1/v1/series response status: got %s, want %s", got, want)
}
if got, want := series.IsPartial, false; got != want {
t.Fatalf("unexpected /ap1/v1/series response isPartial value: got %t, want %t", got, want)
}
if got, want := len(series.Data), numMetrics; got != want {
t.Fatalf("unexpected /ap1/v1/series response series count: got %d, want %d", got, want)
}
tc.Assert(&apptest.AssertOptions{
Msg: "unexpected /api/v1/series response",
Got: func() any {
res := vmselect.PrometheusAPIV1Series(t, `{__name__=~".*"}`, qopts)
res.Sort()
return res
},
Want: want,
})
}

View File

@ -6,6 +6,10 @@ import (
"regexp"
"strings"
"testing"
"time"
pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/golang/snappy"
)
// Vminsert holds the state of a vminsert app and provides vminsert-specific
@ -45,6 +49,20 @@ func StartVminsert(instance string, flags []string, cli *Client) (*Vminsert, err
}, nil
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vminsert endpoint.
func (app *Vminsert) PrometheusAPIV1Write(t *testing.T, records []pb.TimeSeries, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/write", app.httpListenAddr, opts.getTenant())
wr := pb.WriteRequest{Timeseries: records}
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
app.sendBlocking(t, len(records), func() {
app.cli.Post(t, url, "application/x-protobuf", data, http.StatusNoContent)
})
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format for the given
// tenant by sending a HTTP POST request to
@ -54,11 +72,62 @@ func StartVminsert(instance string, flags []string, cli *Client) (*Vminsert, err
func (app *Vminsert) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, opts QueryOpts) {
t.Helper()
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/prometheus", app.httpListenAddr, opts.Tenant)
app.cli.Post(t, url, "text/plain", strings.Join(records, "\n"), http.StatusNoContent)
url := fmt.Sprintf("http://%s/insert/%s/prometheus/api/v1/import/prometheus", app.httpListenAddr, opts.getTenant())
uv := opts.asURLValues()
uvs := uv.Encode()
if len(uvs) > 0 {
url += "?" + uvs
}
data := []byte(strings.Join(records, "\n"))
app.sendBlocking(t, len(records), func() {
app.cli.Post(t, url, "text/plain", data, http.StatusNoContent)
})
}
// String returns the string representation of the vminsert app state.
func (app *Vminsert) String() string {
return fmt.Sprintf("{app: %s httpListenAddr: %q}", app.app, app.httpListenAddr)
}
// sendBlocking sends the data to vmstorage by executing `send` function and
// waits until the data is actually sent.
//
// vminsert does not send the data immediately. It first puts the data into a
// buffer. Then a background goroutine takes the data from the buffer sends it
// to the vmstorage. This happens every 200ms.
//
// Waiting is implemented a retrieving the value of `vm_rpc_rows_sent_total`
// metric and checking whether it is equal or greater than the wanted value.
// If it is, then the data has been sent to vmstorage.
//
// Unreliable if the records are inserted concurrently.
// TODO(rtm0): Put sending and waiting into a critical section to make reliable?
func (app *Vminsert) sendBlocking(t *testing.T, numRecordsToSend int, send func()) {
t.Helper()
send()
const (
retries = 20
period = 100 * time.Millisecond
)
wantRowsSentCount := app.rpcRowsSentTotal(t) + numRecordsToSend
for range retries {
if app.rpcRowsSentTotal(t) >= wantRowsSentCount {
return
}
time.Sleep(period)
}
t.Fatalf("timed out while waiting for inserted rows to be sent to vmstorage")
}
// rpcRowsSentTotal retrieves the values of all vminsert
// `vm_rpc_rows_sent_total` metrics (there will be one for each vmstorage) and
// returns their integer sum.
func (app *Vminsert) rpcRowsSentTotal(t *testing.T) int {
total := 0.0
for _, v := range app.GetMetricsByPrefix(t, "vm_rpc_rows_sent_total") {
total += v
}
return int(total)
}

View File

@ -3,7 +3,6 @@ package apptest
import (
"fmt"
"net/http"
"net/url"
"regexp"
"testing"
)
@ -55,39 +54,50 @@ func (app *Vmselect) ClusternativeListenAddr() string {
return app.clusternativeListenAddr
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a HTTP POST request to /prometheus/api/v1/query
// vmsingle endpoint.
// PrometheusAPIV1Export is a test helper function that performs the export of
// raw samples in JSON line format by sending a HTTP POST request to
// /prometheus/api/v1/export vmselect endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query
func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
// See https://docs.victoriametrics.com/url-examples/#apiv1export
func (app *Vmselect) PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query", app.httpListenAddr, opts.Tenant)
values := url.Values{}
exportURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/export", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res := app.cli.PostForm(t, exportURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
// instant query by sending a HTTP POST request to /prometheus/api/v1/query
// vmselect endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query
func (app *Vmselect) PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("query", query)
values.Add("time", time)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, queryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1QueryRange is a test helper function that performs
// PromQL/MetricsQL range query by sending a HTTP POST request to
// /prometheus/api/v1/query_range vmsingle endpoint.
// /prometheus/api/v1/query_range vmselect endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query_range
func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query_range", app.httpListenAddr, opts.Tenant)
values := url.Values{}
queryURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/query_range", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("query", query)
values.Add("start", start)
values.Add("end", end)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, queryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@ -99,9 +109,10 @@ func (app *Vmselect) PrometheusAPIV1QueryRange(t *testing.T, query, start, end,
func (app *Vmselect) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series", app.httpListenAddr, opts.Tenant)
values := url.Values{}
seriesURL := fmt.Sprintf("http://%s/select/%s/prometheus/api/v1/series", app.httpListenAddr, opts.getTenant())
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res := app.cli.PostForm(t, seriesURL, values, http.StatusOK)
return NewPrometheusAPIV1SeriesResponse(t, res)
}

View File

@ -3,12 +3,14 @@ package apptest
import (
"fmt"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"testing"
"time"
pb "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/golang/snappy"
)
// Vmsingle holds the state of a vmsingle app and provides vmsingle-specific
@ -20,11 +22,18 @@ type Vmsingle struct {
storageDataPath string
httpListenAddr string
forceFlushURL string
// vmstorage URLs.
forceFlushURL string
// vminsert URLs.
prometheusAPIV1ImportPrometheusURL string
prometheusAPIV1QueryURL string
prometheusAPIV1QueryRangeURL string
prometheusAPIV1SeriesURL string
prometheusAPIV1WriteURL string
// vmselect URLs.
prometheusAPIV1ExportURL string
prometheusAPIV1QueryURL string
prometheusAPIV1QueryRangeURL string
prometheusAPIV1SeriesURL string
}
// StartVmsingle starts an instance of vmsingle with the given flags. It also
@ -56,6 +65,8 @@ func StartVmsingle(instance string, flags []string, cli *Client) (*Vmsingle, err
forceFlushURL: fmt.Sprintf("http://%s/internal/force_flush", stderrExtracts[1]),
prometheusAPIV1ImportPrometheusURL: fmt.Sprintf("http://%s/prometheus/api/v1/import/prometheus", stderrExtracts[1]),
prometheusAPIV1WriteURL: fmt.Sprintf("http://%s/prometheus/api/v1/write", stderrExtracts[1]),
prometheusAPIV1ExportURL: fmt.Sprintf("http://%s/prometheus/api/v1/export", stderrExtracts[1]),
prometheusAPIV1QueryURL: fmt.Sprintf("http://%s/prometheus/api/v1/query", stderrExtracts[1]),
prometheusAPIV1QueryRangeURL: fmt.Sprintf("http://%s/prometheus/api/v1/query_range", stderrExtracts[1]),
prometheusAPIV1SeriesURL: fmt.Sprintf("http://%s/prometheus/api/v1/series", stderrExtracts[1]),
@ -70,6 +81,17 @@ func (app *Vmsingle) ForceFlush(t *testing.T) {
app.cli.Get(t, app.forceFlushURL, http.StatusOK)
}
// PrometheusAPIV1Write is a test helper function that inserts a
// collection of records in Prometheus remote-write format by sending a HTTP
// POST request to /prometheus/api/v1/write vmsingle endpoint.
func (app *Vmsingle) PrometheusAPIV1Write(t *testing.T, records []pb.TimeSeries, _ QueryOpts) {
t.Helper()
wr := pb.WriteRequest{Timeseries: records}
data := snappy.Encode(nil, wr.MarshalProtobuf(nil))
app.cli.Post(t, app.prometheusAPIV1WriteURL, "application/x-protobuf", data, http.StatusNoContent)
}
// PrometheusAPIV1ImportPrometheus is a test helper function that inserts a
// collection of records in Prometheus text exposition format by sending a HTTP
// POST request to /prometheus/api/v1/import/prometheus vmsingle endpoint.
@ -78,7 +100,23 @@ func (app *Vmsingle) ForceFlush(t *testing.T) {
func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []string, _ QueryOpts) {
t.Helper()
app.cli.Post(t, app.prometheusAPIV1ImportPrometheusURL, "text/plain", strings.Join(records, "\n"), http.StatusNoContent)
data := []byte(strings.Join(records, "\n"))
app.cli.Post(t, app.prometheusAPIV1ImportPrometheusURL, "text/plain", data, http.StatusNoContent)
}
// PrometheusAPIV1Export is a test helper function that performs the export of
// raw samples in JSON line format by sending a HTTP POST request to
// /prometheus/api/v1/export vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1export
func (app *Vmsingle) PrometheusAPIV1Export(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := opts.asURLValues()
values.Add("match[]", query)
values.Add("format", "promapi")
res := app.cli.PostForm(t, app.prometheusAPIV1ExportURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
// PrometheusAPIV1Query is a test helper function that performs PromQL/MetricsQL
@ -86,14 +124,11 @@ func (app *Vmsingle) PrometheusAPIV1ImportPrometheus(t *testing.T, records []str
// vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query
func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query, time, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := url.Values{}
values := opts.asURLValues()
values.Add("query", query)
values.Add("time", time)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, app.prometheusAPIV1QueryURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@ -103,15 +138,12 @@ func (app *Vmsingle) PrometheusAPIV1Query(t *testing.T, query, time, step string
// /prometheus/api/v1/query_range vmsingle endpoint.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1query_range
func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query, start, end, step string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query string, opts QueryOpts) *PrometheusAPIV1QueryResponse {
t.Helper()
values := url.Values{}
values := opts.asURLValues()
values.Add("query", query)
values.Add("start", start)
values.Add("end", end)
values.Add("step", step)
values.Add("timeout", opts.Timeout)
res := app.cli.PostForm(t, app.prometheusAPIV1QueryRangeURL, values, http.StatusOK)
return NewPrometheusAPIV1QueryResponse(t, res)
}
@ -120,11 +152,12 @@ func (app *Vmsingle) PrometheusAPIV1QueryRange(t *testing.T, query, start, end,
// and returns the list of time series that match the query.
//
// See https://docs.victoriametrics.com/url-examples/#apiv1series
func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, _ QueryOpts) *PrometheusAPIV1SeriesResponse {
func (app *Vmsingle) PrometheusAPIV1Series(t *testing.T, matchQuery string, opts QueryOpts) *PrometheusAPIV1SeriesResponse {
t.Helper()
values := url.Values{}
values := opts.asURLValues()
values.Add("match[]", matchQuery)
res := app.cli.PostForm(t, app.prometheusAPIV1SeriesURL, values, http.StatusOK)
return NewPrometheusAPIV1SeriesResponse(t, res)
}

View File

@ -4798,7 +4798,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 30
"y": 6
},
"id": 73,
"options": {
@ -4915,7 +4915,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 22
"y": 6
},
"id": 131,
"options": {
@ -5019,7 +5019,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 30
"y": 14
},
"id": 130,
"options": {
@ -5136,7 +5136,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 30
"y": 14
},
"id": 77,
"options": {

View File

@ -4797,7 +4797,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 30
"y": 6
},
"id": 73,
"options": {
@ -4914,7 +4914,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 22
"y": 6
},
"id": 131,
"options": {
@ -5018,7 +5018,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 30
"y": 14
},
"id": 130,
"options": {
@ -5135,7 +5135,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 30
"y": 14
},
"id": 77,
"options": {

View File

@ -167,6 +167,8 @@ The list of alerting rules is the following:
alerting rules related to [vmauth](https://docs.victoriametrics.com/vmauth/) component;
* [alerts-vlogs.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-vlogs.yml):
alerting rules related to [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/);
* [alerts-vmanomaly.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/rules/alerts-vmanomaly.yml):
alerting rules related to [VictoriaMetrics Anomaly Detection](https://docs.victoriametrics.com/anomaly-detection/);
Please, also see [how to monitor](https://docs.victoriametrics.com/single-server-victoriametrics/#monitoring)
VictoriaMetrics installations.
@ -180,8 +182,8 @@ make docker-victorialogs-up
VictoriaLogs will be accessible on the `--httpListenAddr=:9428` port.
In addition to VictoriaLogs server, the docker compose contains the following componetns:
* [fluentbit](https://docs.fluentbit.io/manual) service for collecting docker logs and sending them to VictoriaLogs;
* VictoriaMetrics single server to collect metrics from `VictoriaLogs` and `fluentbit`;
* [vector](https://vector.dev/guides/) service for collecting docker logs and sending them to VictoriaLogs;
* VictoriaMetrics single server to collect metrics from `VictoriaLogs` and `vector`;
* [grafana](#grafana) is configured with [VictoriaLogs datasource](https://github.com/VictoriaMetrics/victorialogs-datasource).
To access Grafana use link [http://localhost:3000](http://localhost:3000).
@ -198,9 +200,13 @@ make docker-victorialogs-down
```
Please see more examples on integration of VictoriaLogs with other log shippers below:
* [filebeat-docker](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/filebeat-docker)
* [filebeat-syslog](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/filebeat-syslog)
* [fluentbit-docker](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/fluentbit-docker)
* [filebeat](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/filebeat)
* [fluentbit](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/fluentbit)
* [logstash](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/logstash)
* [promtail](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/promtail)
* [vector-docker](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/vector-docker)
* [vector](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/vector)
* [datadog-agent](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/datadog-agent)
* [journald](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/journald)
* [opentelemetry-collector](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/opentelemetry-collector)
* [telegraf](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/telegraf)
* [fluentd]((https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker/victorialogs/fluentd)

View File

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.106.0
image: victoriametrics/vmagent:v1.106.1
depends_on:
- "vminsert"
ports:
@ -39,7 +39,7 @@ services:
# where N is number of vmstorages (2 in this case).
vmstorage-1:
container_name: vmstorage-1
image: victoriametrics/vmstorage:v1.106.0-cluster
image: victoriametrics/vmstorage:v1.106.1-cluster
ports:
- 8482
- 8400
@ -51,7 +51,7 @@ services:
restart: always
vmstorage-2:
container_name: vmstorage-2
image: victoriametrics/vmstorage:v1.106.0-cluster
image: victoriametrics/vmstorage:v1.106.1-cluster
ports:
- 8482
- 8400
@ -66,7 +66,7 @@ services:
# pre-process them and distributes across configured vmstorage shards.
vminsert:
container_name: vminsert
image: victoriametrics/vminsert:v1.106.0-cluster
image: victoriametrics/vminsert:v1.106.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -81,7 +81,7 @@ services:
# vmselect collects results from configured `--storageNode` shards.
vmselect-1:
container_name: vmselect-1
image: victoriametrics/vmselect:v1.106.0-cluster
image: victoriametrics/vmselect:v1.106.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -94,7 +94,7 @@ services:
restart: always
vmselect-2:
container_name: vmselect-2
image: victoriametrics/vmselect:v1.106.0-cluster
image: victoriametrics/vmselect:v1.106.1-cluster
depends_on:
- "vmstorage-1"
- "vmstorage-2"
@ -112,7 +112,7 @@ services:
# It can be used as an authentication proxy.
vmauth:
container_name: vmauth
image: victoriametrics/vmauth:v1.106.0
image: victoriametrics/vmauth:v1.106.1
depends_on:
- "vmselect-1"
- "vmselect-2"
@ -127,7 +127,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.106.0
image: victoriametrics/vmalert:v1.106.1
depends_on:
- "vmauth"
ports:

View File

@ -16,23 +16,28 @@ services:
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
- ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json
environment:
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.6.2/victorialogs-datasource-v0.6.2.zip;victorialogs-datasource"
- "GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.8.0/victorialogs-datasource-v0.8.0.zip;victorialogs-datasource"
- "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victorialogs-datasource"
networks:
- vm_net
restart: always
# fluentbit is logs collector. It collects logs according to fluent-bit.conf
# vector is logs collector. It collects logs according to vector.yaml
# and forwards them to VictoriaLogs
fluentbit:
container_name: fluentbit
image: fluent/fluent-bit:2.1.4
vector:
image: docker.io/timberio/vector:0.42.X-distroless-libc
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- ./fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
- type: bind
source: /var/run/docker.sock
target: /var/run/docker.sock
- type: bind
source: /var/lib/docker
target: /var/lib/docker
- ./vector.yaml:/etc/vector/vector.yaml:ro
depends_on: [victorialogs]
ports:
- "5140:5140"
- "8686:8686"
user: root
networks:
- vm_net
@ -55,7 +60,7 @@ services:
# scraping, storing metrics and serve read requests.
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.106.0
image: victoriametrics/victoria-metrics:v1.106.1
ports:
- 8428:8428
volumes:
@ -74,7 +79,7 @@ services:
# depending on the requested path.
vmauth:
container_name: vmauth
image: victoriametrics/vmauth:v1.106.0
image: victoriametrics/vmauth:v1.106.1
depends_on:
- "victoriametrics"
- "victorialogs"
@ -91,7 +96,7 @@ services:
# vmalert executes alerting and recording rules according to given rule type.
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.106.0
image: victoriametrics/vmalert:v1.106.1
depends_on:
- "vmauth"
- "alertmanager"
@ -104,6 +109,8 @@ services:
- ./rules/alerts-health.yml:/etc/alerts/alerts-health.yml
- ./rules/alerts-vmagent.yml:/etc/alerts/alerts-vmagent.yml
- ./rules/alerts-vmalert.yml:/etc/alerts/alerts-vmalert.yml
# vlogs rule
- ./rules/vlogs-example-alerts.yml:/etc/alerts/vlogs-example-alerts.yml
command:
- "--datasource.url=http://vmauth:8427/"
- "--remoteRead.url=http://victoriametrics:8428/"

View File

@ -4,7 +4,7 @@ services:
# And forward them to --remoteWrite.url
vmagent:
container_name: vmagent
image: victoriametrics/vmagent:v1.106.0
image: victoriametrics/vmagent:v1.106.1
depends_on:
- "victoriametrics"
ports:
@ -22,7 +22,7 @@ services:
# storing metrics and serve read requests.
victoriametrics:
container_name: victoriametrics
image: victoriametrics/victoria-metrics:v1.106.0
image: victoriametrics/victoria-metrics:v1.106.1
ports:
- 8428:8428
- 8089:8089
@ -65,7 +65,7 @@ services:
# vmalert executes alerting and recording rules
vmalert:
container_name: vmalert
image: victoriametrics/vmalert:v1.106.0
image: victoriametrics/vmalert:v1.106.1
depends_on:
- "victoriametrics"
- "alertmanager"

View File

@ -1,33 +0,0 @@
[INPUT]
name tail
path /var/lib/docker/containers/**/*.log
path_key path
multiline.parser docker, cri
Parser docker
Docker_Mode On
[INPUT]
Name syslog
Listen 0.0.0.0
Port 5140
Parser syslog-rfc3164
Mode tcp
[SERVICE]
Flush 1
Parsers_File parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_PORT 2020
[Output]
Name http
Match *
host victorialogs
port 9428
compress gzip
uri /insert/jsonline?_stream_fields=stream,path&_msg_field=log&_time_field=date
format json_lines
json_date_format iso8601
header AccountID 0
header ProjectID 0

View File

@ -18,25 +18,25 @@ groups:
Check vmalert's logs for detailed error message."
- alert: AlertingRulesError
expr: sum(increase(vmalert_alerting_rules_errors_total[5m])) without(alertname, id) > 0
expr: sum(increase(vmalert_alerting_rules_errors_total[5m])) without(id) > 0
for: 5m
labels:
severity: warning
annotations:
dashboard: "http://localhost:3000/d/LzldHAVnz?viewPanel=13&var-instance={{ $labels.instance }}&var-file={{ $labels.file }}&var-group={{ $labels.group }}"
summary: "Alerting rules are failing for vmalert instance {{ $labels.instance }}"
description: "Alerting rules execution is failing for group \"{{ $labels.group }}\" in file \"{{ $labels.file }}\".
description: "Alerting rules execution is failing for \"{{ $labels.alertname }}\" from group \"{{ $labels.group }}\" in file \"{{ $labels.file }}\".
Check vmalert's logs for detailed error message."
- alert: RecordingRulesError
expr: sum(increase(vmalert_recording_rules_errors_total[5m])) without(recording, id) > 0
expr: sum(increase(vmalert_recording_rules_errors_total[5m])) without(id) > 0
for: 5m
labels:
severity: warning
annotations:
dashboard: "http://localhost:3000/d/LzldHAVnz?viewPanel=30&var-instance={{ $labels.instance }}&var-file={{ $labels.file }}&var-group={{ $labels.group }}"
summary: "Recording rules are failing for vmalert instance {{ $labels.instance }}"
description: "Recording rules execution is failing for group \"{{ $labels.group }}\" in file \"{{ $labels.file }}\".
description: "Recording rules execution is failing for \"{{ $labels.recording }}\" from group \"{{ $labels.group }}\" in file \"{{ $labels.file }}\".
Check vmalert's logs for detailed error message."
- alert: RecordingRulesNoData

View File

@ -0,0 +1,121 @@
# This file provides a recommended list of alerts to monitor the health of VictoriaMetrics Anomaly Detection (vmanomaly).
# Note: The alerts below are general recommendations and may require customization,
# including threshold adjustments, to suit the specifics of your setup.
groups:
# Note - Adjust the `job` filter to match your specific setup.
# By default, the `job` label for vmanomaly in push-based self-monitoring mode is set to `vmanomaly`.
# However, this can be overridden using additional labels. For further details, refer to the example here:
# https://docs.victoriametrics.com/anomaly-detection/components/monitoring/?highlight=extra_labels#monitoring-section-config-example
- name: vmanomaly-health
rules:
- alert: TooManyRestarts
expr: changes(process_start_time_seconds{job=~".*vmanomaly.*"}[15m]) > 2
labels:
severity: critical
annotations:
summary: "{{ $labels.job }} too many restarts (instance {{ $labels.instance }})"
description: |
Job {{ $labels.job }} (instance {{ $labels.instance }}) has restarted more than twice in the last 15 minutes.
It might be crashlooping. Please check the logs for more details.
Additionally, refer to the "r:errors" value in the "Instance Overview" section of the self-monitoring Grafana dashboard.
# works if you use Prometheus scraping (pull model only)
- alert: ServiceDown
expr: up{job=~".*vmanomaly.*"} == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Service {{ $labels.job }} is down on {{ $labels.instance }}"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5m"
- alert: ProcessNearFDLimits
expr: (process_max_fds{job=~".*vmanomaly.*"} - process_open_fds{job=~".*vmanomaly.*"}) < 100
for: 5m
labels:
severity: critical
annotations:
summary: "Number of free file descriptors is less than 100 for \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") for the last 5m"
description: |
Exhausting OS file descriptors limit can cause severe degradation of the process.
Consider to increase the limit as fast as possible.
- alert: TooHighCPUUsage
expr: >
sum(rate(process_cpu_seconds_total{job=~".*vmanomaly.*"}[5m])) by (job, instance) /
sum(vmanomaly_cpu_cores_available{job=~".*vmanomaly.*"}[5m]) by (job, instance) > 0.9
for: 5m
labels:
severity: critical
annotations:
summary: "More than 90% of CPU is used by \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") during the last 5m"
description: >
Too high CPU usage may be a sign of insufficient resources and make process unstable.
Consider to either increase available CPU resources or decrease the load on the process.
- alert: TooHighMemoryUsage
expr: (min_over_time(process_resident_memory_bytes[10m]) / vmanomaly_available_memory_bytes) > 0.85
for: 5m
labels:
severity: critical
annotations:
summary: "It is more than 85% of memory used by \"{{ $labels.job }}\"(\"{{ $labels.instance }}\")"
description: |
Too high memory usage may result into multiple issues such as OOMs or degraded performance.
E.g. it can be caused by high churn rate in your input data.
Consider to either increase available memory or decrease the load on the process.
- name: vmanomaly-issues
rules:
- alert: ServiceErrorsDetected
expr: sum(increase(vmanomaly_model_run_errors_total{job=~".*vmanomaly.*"}[5m])) by (job, instance, stage) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Model Run Errors in \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") stage: {{ $labels.stage }} during the last 5m"
description: >
Errors in the service may indicate a problem with the service itself or its dependencies.
Investigate the logs for more details.
- alert: SkippedModelRunsDetected
expr: sum(increase(vmanomaly_model_runs_skipped_total{job=~".*vmanomaly.*"}[5m])) by (job, instance, stage) > 0
for: 5m
labels:
severity: warning
annotations:
summary: "Skipped Model Runs in \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") stage: {{ $labels.stage }} during the last 5m"
description: >
Skipped model runs may indicate issues like:
1. No new or valid data is available for the current run.
2. The presence of new time series that do not have a trained model yet.
3. No new (or valid) datapoints produced during inference.
Investigate the logs for more details.
- alert: HighReadErrorRate
expr: >
(
sum(increase(vmanomaly_reader_responses_total{job=~".*vmanomaly.*", code=~"2.."}[5m])) by (job, instance, url) /
sum(increase(vmanomaly_reader_responses_total{job=~".*vmanomaly.*"}[5m])) by (job, instance, url)
) < 0.95
for: 5m
labels:
severity: warning
annotations:
summary: "High error rate in read requests for \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") for url: {{ $labels.url }} during the last 5m"
description: >
Reading errors may indicate issues with the input data source, server-side constraint violations, security or network issues.
Investigate the logs for more details.
- alert: HighWriteErrorRate
expr: >
(
sum(increase(vmanomaly_writer_responses_total{job=~".*vmanomaly.*", code=~"2.."}[5m])) by (job, instance, url) /
sum(increase(vmanomaly_writer_responses_total{job=~".*vmanomaly.*"}[5m])) by (job, instance, url)
) < 0.95
for: 5m
labels:
severity: warning
annotations:
summary: "High error rate in write requests for \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") for url: {{ $labels.url }} during the last 5m"
description: >
Writing errors may indicate issues with the destination source, server-side constraint violations, security, or network issues.
Investigate the logs for more details.

View File

@ -1,5 +1,5 @@
groups:
- name: TestGroup
- name: log-rules
type: vlogs
interval: 1m
rules:

View File

@ -0,0 +1,41 @@
api:
enabled: true
address: 0.0.0.0:8686
sources:
docker:
type: docker_logs
demo:
type: demo_logs
format: apache_common
interval: 10
vector_metrics:
type: internal_metrics
transforms:
msg_parser:
type: remap
inputs: [docker]
source: |
.message = parse_json(.message) ?? .message
sinks:
elasticsearch:
type: elasticsearch
inputs: [demo, msg_parser]
endpoints: [http://victorialogs:9428/insert/elasticsearch/]
mode: bulk
api_version: v8
compression: gzip
healthcheck:
enabled: false
request:
headers:
VL-Stream-Fields: source_type,label.com.docker.compose.service
VL-Time-Field: timestamp
VL-Msg-Field: message,log
AccountID: "0"
ProjectID: "0"
victoriametrics:
type: prometheus_remote_write
endpoint: http://victoriametrics:8428/api/v1/write
inputs: [vector_metrics]
healthcheck:
enabled: false

View File

@ -18,7 +18,7 @@ services:
retries: 10
dd-logs:
image: docker.io/victoriametrics/vmauth:v1.106.0
image: docker.io/victoriametrics/vmauth:v1.106.1
restart: on-failure
volumes:
- ./:/etc/vmauth

View File

@ -2,7 +2,7 @@ include:
- ../compose-base.yml
services:
vector:
image: docker.io/timberio/vector:0.40.0-distroless-static
image: docker.io/timberio/vector:0.42.X-distroless-libc
restart: on-failure
volumes:
- type: bind

Some files were not shown because too many files have changed in this diff Show More