2020-02-23 12:35:47 +01:00
|
|
|
package promscrape
|
|
|
|
|
|
|
|
import (
|
2020-11-04 16:12:05 +01:00
|
|
|
"flag"
|
2020-02-23 12:35:47 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-12-14 12:36:48 +01:00
|
|
|
"net/http"
|
2022-04-19 17:26:21 +02:00
|
|
|
"regexp"
|
2020-02-23 12:35:47 +01:00
|
|
|
"sort"
|
2020-12-14 13:02:57 +01:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-02-23 12:35:47 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
2022-02-03 17:57:36 +01:00
|
|
|
"unsafe"
|
2020-10-20 20:44:59 +02:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
2022-04-20 14:21:12 +02:00
|
|
|
xxhash "github.com/cespare/xxhash/v2"
|
2020-02-23 12:35:47 +01:00
|
|
|
)
|
|
|
|
|
2021-03-15 20:59:25 +01:00
|
|
|
var maxDroppedTargets = flag.Int("promscrape.maxDroppedTargets", 1000, "The maximum number of droppedTargets to show at /api/v1/targets page. "+
|
2020-11-04 16:12:05 +01:00
|
|
|
"Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. "+
|
|
|
|
"Note that the increased number of tracked dropped targets may result in increased memory usage")
|
|
|
|
|
2020-02-23 12:35:47 +01:00
|
|
|
var tsmGlobal = newTargetStatusMap()
|
|
|
|
|
2022-02-03 17:57:36 +01:00
|
|
|
// WriteTargetResponse serves requests to /target_response?id=<id>
|
|
|
|
//
|
|
|
|
// It fetches response for the given target id and returns it.
|
|
|
|
func WriteTargetResponse(w http.ResponseWriter, r *http.Request) error {
|
|
|
|
targetID := r.FormValue("id")
|
|
|
|
sw := tsmGlobal.getScrapeWorkByTargetID(targetID)
|
|
|
|
if sw == nil {
|
|
|
|
return fmt.Errorf("cannot find target for id=%s", targetID)
|
|
|
|
}
|
|
|
|
data, err := sw.getTargetResponse()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot fetch response from id=%s: %w", targetID, err)
|
|
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
_, err = w.Write(data)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-14 13:02:57 +01:00
|
|
|
// WriteHumanReadableTargetsStatus writes human-readable status for all the scrape targets to w according to r.
|
|
|
|
func WriteHumanReadableTargetsStatus(w http.ResponseWriter, r *http.Request) {
|
|
|
|
showOriginalLabels, _ := strconv.ParseBool(r.FormValue("show_original_labels"))
|
|
|
|
showOnlyUnhealthy, _ := strconv.ParseBool(r.FormValue("show_only_unhealthy"))
|
2022-04-19 17:26:21 +02:00
|
|
|
endpointSearch := strings.TrimSpace(r.FormValue("endpoint_search"))
|
|
|
|
labelSearch := strings.TrimSpace(r.FormValue("label_search"))
|
2020-12-14 13:02:57 +01:00
|
|
|
if accept := r.Header.Get("Accept"); strings.Contains(accept, "text/html") {
|
2020-12-14 12:36:48 +01:00
|
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
2022-04-19 17:26:21 +02:00
|
|
|
tsmGlobal.WriteTargetsHTML(w, showOnlyUnhealthy, endpointSearch, labelSearch)
|
2020-12-14 13:02:57 +01:00
|
|
|
} else {
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
2022-04-19 17:26:21 +02:00
|
|
|
tsmGlobal.WriteTargetsPlain(w, showOriginalLabels, showOnlyUnhealthy, endpointSearch, labelSearch)
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|
2020-02-23 12:35:47 +01:00
|
|
|
}
|
|
|
|
|
2020-10-20 20:44:59 +02:00
|
|
|
// WriteAPIV1Targets writes /api/v1/targets to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func WriteAPIV1Targets(w io.Writer, state string) {
|
|
|
|
if state == "" {
|
|
|
|
state = "any"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `{"status":"success","data":{"activeTargets":`)
|
|
|
|
if state == "active" || state == "any" {
|
|
|
|
tsmGlobal.WriteActiveTargetsJSON(w)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(w, `[]`)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"droppedTargets":`)
|
|
|
|
if state == "dropped" || state == "any" {
|
|
|
|
droppedTargetsMap.WriteDroppedTargetsJSON(w)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(w, `[]`)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `}}`)
|
|
|
|
}
|
|
|
|
|
2020-02-23 12:35:47 +01:00
|
|
|
type targetStatusMap struct {
|
2021-06-18 09:53:10 +02:00
|
|
|
mu sync.Mutex
|
2022-02-03 17:57:36 +01:00
|
|
|
m map[*scrapeWork]*targetStatus
|
2021-06-18 09:53:10 +02:00
|
|
|
jobNames []string
|
2020-02-23 12:35:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func newTargetStatusMap() *targetStatusMap {
|
|
|
|
return &targetStatusMap{
|
2022-02-03 17:57:36 +01:00
|
|
|
m: make(map[*scrapeWork]*targetStatus),
|
2020-02-23 12:35:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tsm *targetStatusMap) Reset() {
|
|
|
|
tsm.mu.Lock()
|
2022-02-03 17:57:36 +01:00
|
|
|
tsm.m = make(map[*scrapeWork]*targetStatus)
|
2020-02-23 12:35:47 +01:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2021-06-18 09:53:10 +02:00
|
|
|
func (tsm *targetStatusMap) registerJobNames(jobNames []string) {
|
|
|
|
tsm.mu.Lock()
|
|
|
|
tsm.jobNames = append(tsm.jobNames[:0], jobNames...)
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 17:57:36 +01:00
|
|
|
func (tsm *targetStatusMap) Register(sw *scrapeWork) {
|
2020-03-11 02:19:56 +01:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 13:30:33 +01:00
|
|
|
tsm.m[sw] = &targetStatus{
|
|
|
|
sw: sw,
|
2020-03-11 02:19:56 +01:00
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 17:57:36 +01:00
|
|
|
func (tsm *targetStatusMap) Unregister(sw *scrapeWork) {
|
2020-03-11 02:19:56 +01:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 13:30:33 +01:00
|
|
|
delete(tsm.m, sw)
|
2020-03-11 02:19:56 +01:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 17:57:36 +01:00
|
|
|
func (tsm *targetStatusMap) Update(sw *scrapeWork, group string, up bool, scrapeTime, scrapeDuration int64, samplesScraped int, err error) {
|
2020-02-23 12:35:47 +01:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 13:30:33 +01:00
|
|
|
ts := tsm.m[sw]
|
|
|
|
if ts == nil {
|
|
|
|
ts = &targetStatus{
|
|
|
|
sw: sw,
|
|
|
|
}
|
|
|
|
tsm.m[sw] = ts
|
2020-02-23 12:35:47 +01:00
|
|
|
}
|
2020-12-17 13:30:33 +01:00
|
|
|
ts.up = up
|
|
|
|
ts.scrapeGroup = group
|
|
|
|
ts.scrapeTime = scrapeTime
|
|
|
|
ts.scrapeDuration = scrapeDuration
|
2021-06-14 13:01:13 +02:00
|
|
|
ts.samplesScraped = samplesScraped
|
2022-02-03 19:22:35 +01:00
|
|
|
ts.scrapesTotal++
|
|
|
|
if !up {
|
|
|
|
ts.scrapesFailed++
|
|
|
|
}
|
2020-12-17 13:30:33 +01:00
|
|
|
ts.err = err
|
2020-02-23 12:35:47 +01:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 17:57:36 +01:00
|
|
|
func (tsm *targetStatusMap) getScrapeWorkByTargetID(targetID string) *scrapeWork {
|
|
|
|
tsm.mu.Lock()
|
|
|
|
defer tsm.mu.Unlock()
|
|
|
|
for sw := range tsm.m {
|
|
|
|
if getTargetID(sw) == targetID {
|
|
|
|
return sw
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getTargetID(sw *scrapeWork) string {
|
|
|
|
return fmt.Sprintf("%016x", uintptr(unsafe.Pointer(sw)))
|
|
|
|
}
|
|
|
|
|
2020-07-13 20:52:03 +02:00
|
|
|
// StatusByGroup returns the number of targets with status==up
|
|
|
|
// for the given group name
|
|
|
|
func (tsm *targetStatusMap) StatusByGroup(group string, up bool) int {
|
|
|
|
var count int
|
|
|
|
tsm.mu.Lock()
|
|
|
|
for _, st := range tsm.m {
|
|
|
|
if st.scrapeGroup == group && st.up == up {
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2020-10-20 20:44:59 +02:00
|
|
|
// WriteActiveTargetsJSON writes `activeTargets` contents to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func (tsm *targetStatusMap) WriteActiveTargetsJSON(w io.Writer) {
|
|
|
|
tsm.mu.Lock()
|
|
|
|
type keyStatus struct {
|
|
|
|
key string
|
|
|
|
st targetStatus
|
|
|
|
}
|
|
|
|
kss := make([]keyStatus, 0, len(tsm.m))
|
2020-12-17 13:30:33 +01:00
|
|
|
for sw, st := range tsm.m {
|
2022-02-03 17:57:36 +01:00
|
|
|
key := promLabelsString(sw.Config.OriginalLabels)
|
2020-10-20 20:44:59 +02:00
|
|
|
kss = append(kss, keyStatus{
|
|
|
|
key: key,
|
2020-12-08 10:50:46 +01:00
|
|
|
st: *st,
|
2020-10-20 20:44:59 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
|
|
|
|
sort.Slice(kss, func(i, j int) bool {
|
|
|
|
return kss[i].key < kss[j].key
|
|
|
|
})
|
|
|
|
fmt.Fprintf(w, `[`)
|
|
|
|
for i, ks := range kss {
|
|
|
|
st := ks.st
|
|
|
|
fmt.Fprintf(w, `{"discoveredLabels":`)
|
2022-02-03 17:57:36 +01:00
|
|
|
writeLabelsJSON(w, st.sw.Config.OriginalLabels)
|
2020-10-20 20:44:59 +02:00
|
|
|
fmt.Fprintf(w, `,"labels":`)
|
2022-02-03 17:57:36 +01:00
|
|
|
labelsFinalized := promrelabel.FinalizeLabels(nil, st.sw.Config.Labels)
|
2020-10-20 20:44:59 +02:00
|
|
|
writeLabelsJSON(w, labelsFinalized)
|
2022-02-03 17:57:36 +01:00
|
|
|
fmt.Fprintf(w, `,"scrapePool":%q`, st.sw.Config.Job())
|
|
|
|
fmt.Fprintf(w, `,"scrapeUrl":%q`, st.sw.Config.ScrapeURL)
|
2020-10-20 20:44:59 +02:00
|
|
|
errMsg := ""
|
|
|
|
if st.err != nil {
|
|
|
|
errMsg = st.err.Error()
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"lastError":%q`, errMsg)
|
|
|
|
fmt.Fprintf(w, `,"lastScrape":%q`, time.Unix(st.scrapeTime/1000, (st.scrapeTime%1000)*1e6).Format(time.RFC3339Nano))
|
|
|
|
fmt.Fprintf(w, `,"lastScrapeDuration":%g`, (time.Millisecond * time.Duration(st.scrapeDuration)).Seconds())
|
2021-06-14 13:01:13 +02:00
|
|
|
fmt.Fprintf(w, `,"lastSamplesScraped":%d`, st.samplesScraped)
|
2020-10-20 20:44:59 +02:00
|
|
|
state := "up"
|
|
|
|
if !st.up {
|
|
|
|
state = "down"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"health":%q}`, state)
|
|
|
|
if i+1 < len(kss) {
|
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `]`)
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeLabelsJSON(w io.Writer, labels []prompbmarshal.Label) {
|
|
|
|
fmt.Fprintf(w, `{`)
|
|
|
|
for i, label := range labels {
|
|
|
|
fmt.Fprintf(w, "%q:%q", label.Name, label.Value)
|
|
|
|
if i+1 < len(labels) {
|
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `}`)
|
|
|
|
}
|
|
|
|
|
2020-02-23 12:35:47 +01:00
|
|
|
type targetStatus struct {
|
2022-02-03 17:57:36 +01:00
|
|
|
sw *scrapeWork
|
2020-02-23 12:35:47 +01:00
|
|
|
up bool
|
2020-07-13 20:52:03 +02:00
|
|
|
scrapeGroup string
|
2020-02-23 12:35:47 +01:00
|
|
|
scrapeTime int64
|
|
|
|
scrapeDuration int64
|
2021-06-14 13:01:13 +02:00
|
|
|
samplesScraped int
|
2022-02-03 19:22:35 +01:00
|
|
|
scrapesTotal int
|
|
|
|
scrapesFailed int
|
2020-02-23 12:35:47 +01:00
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *targetStatus) getDurationFromLastScrape() time.Duration {
|
|
|
|
return time.Since(time.Unix(st.scrapeTime/1000, (st.scrapeTime%1000)*1e6))
|
|
|
|
}
|
2020-10-20 20:44:59 +02:00
|
|
|
|
|
|
|
type droppedTargets struct {
|
|
|
|
mu sync.Mutex
|
2022-04-20 14:21:12 +02:00
|
|
|
m map[uint64]droppedTarget
|
2020-10-20 20:44:59 +02:00
|
|
|
lastCleanupTime uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
type droppedTarget struct {
|
|
|
|
originalLabels []prompbmarshal.Label
|
|
|
|
deadline uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dt *droppedTargets) Register(originalLabels []prompbmarshal.Label) {
|
2022-04-20 14:21:12 +02:00
|
|
|
// It is better to have hash collisions instead of spending additional CPU on promLabelsString() call.
|
|
|
|
key := labelsHash(originalLabels)
|
2020-10-20 20:44:59 +02:00
|
|
|
currentTime := fasttime.UnixTimestamp()
|
|
|
|
dt.mu.Lock()
|
2020-11-04 16:03:43 +01:00
|
|
|
if k, ok := dt.m[key]; ok {
|
|
|
|
k.deadline = currentTime + 10*60
|
|
|
|
dt.m[key] = k
|
|
|
|
} else if len(dt.m) < *maxDroppedTargets {
|
|
|
|
dt.m[key] = droppedTarget{
|
|
|
|
originalLabels: originalLabels,
|
|
|
|
deadline: currentTime + 10*60,
|
|
|
|
}
|
2020-10-20 20:44:59 +02:00
|
|
|
}
|
|
|
|
if currentTime-dt.lastCleanupTime > 60 {
|
|
|
|
for k, v := range dt.m {
|
|
|
|
if currentTime > v.deadline {
|
|
|
|
delete(dt.m, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dt.lastCleanupTime = currentTime
|
|
|
|
}
|
|
|
|
dt.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-04-20 14:21:12 +02:00
|
|
|
func labelsHash(labels []prompbmarshal.Label) uint64 {
|
|
|
|
d := xxhashPool.Get().(*xxhash.Digest)
|
|
|
|
for _, label := range labels {
|
|
|
|
_, _ = d.WriteString(label.Name)
|
|
|
|
_, _ = d.WriteString(label.Value)
|
|
|
|
}
|
|
|
|
h := d.Sum64()
|
|
|
|
d.Reset()
|
|
|
|
xxhashPool.Put(d)
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxhashPool = &sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return xxhash.New()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-10-20 20:44:59 +02:00
|
|
|
// WriteDroppedTargetsJSON writes `droppedTargets` contents to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func (dt *droppedTargets) WriteDroppedTargetsJSON(w io.Writer) {
|
|
|
|
dt.mu.Lock()
|
|
|
|
type keyStatus struct {
|
|
|
|
key string
|
|
|
|
originalLabels []prompbmarshal.Label
|
|
|
|
}
|
|
|
|
kss := make([]keyStatus, 0, len(dt.m))
|
|
|
|
for _, v := range dt.m {
|
|
|
|
key := promLabelsString(v.originalLabels)
|
|
|
|
kss = append(kss, keyStatus{
|
|
|
|
key: key,
|
|
|
|
originalLabels: v.originalLabels,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
dt.mu.Unlock()
|
|
|
|
|
|
|
|
sort.Slice(kss, func(i, j int) bool {
|
|
|
|
return kss[i].key < kss[j].key
|
|
|
|
})
|
|
|
|
fmt.Fprintf(w, `[`)
|
|
|
|
for i, ks := range kss {
|
|
|
|
fmt.Fprintf(w, `{"discoveredLabels":`)
|
|
|
|
writeLabelsJSON(w, ks.originalLabels)
|
|
|
|
fmt.Fprintf(w, `}`)
|
|
|
|
if i+1 < len(kss) {
|
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `]`)
|
|
|
|
}
|
|
|
|
|
|
|
|
var droppedTargetsMap = &droppedTargets{
|
2022-04-20 14:21:12 +02:00
|
|
|
m: make(map[uint64]droppedTarget),
|
2020-10-20 20:44:59 +02:00
|
|
|
}
|
2020-12-14 12:36:48 +01:00
|
|
|
|
|
|
|
type jobTargetsStatuses struct {
|
|
|
|
job string
|
|
|
|
upCount int
|
|
|
|
targetsTotal int
|
2022-02-03 19:22:35 +01:00
|
|
|
targetsStatus []targetStatus
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|
|
|
|
|
2022-04-19 17:26:21 +02:00
|
|
|
func (tsm *targetStatusMap) getTargetsStatusByJob(endpointSearch, labelSearch string) ([]jobTargetsStatuses, []string, error) {
|
2020-12-14 12:36:48 +01:00
|
|
|
byJob := make(map[string][]targetStatus)
|
|
|
|
tsm.mu.Lock()
|
|
|
|
for _, st := range tsm.m {
|
2022-02-03 17:57:36 +01:00
|
|
|
job := st.sw.Config.jobNameOriginal
|
2020-12-14 12:36:48 +01:00
|
|
|
byJob[job] = append(byJob[job], *st)
|
|
|
|
}
|
2021-06-18 09:53:10 +02:00
|
|
|
jobNames := append([]string{}, tsm.jobNames...)
|
2020-12-14 12:36:48 +01:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
|
|
|
|
var jts []jobTargetsStatuses
|
|
|
|
for job, statuses := range byJob {
|
|
|
|
sort.Slice(statuses, func(i, j int) bool {
|
2022-02-03 17:57:36 +01:00
|
|
|
return statuses[i].sw.Config.ScrapeURL < statuses[j].sw.Config.ScrapeURL
|
2020-12-14 12:36:48 +01:00
|
|
|
})
|
|
|
|
ups := 0
|
2022-02-03 19:22:35 +01:00
|
|
|
var targetsStatuses []targetStatus
|
2020-12-14 12:36:48 +01:00
|
|
|
for _, ts := range statuses {
|
|
|
|
if ts.up {
|
|
|
|
ups++
|
|
|
|
}
|
2022-02-03 19:22:35 +01:00
|
|
|
targetsStatuses = append(targetsStatuses, ts)
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|
|
|
|
jts = append(jts, jobTargetsStatuses{
|
|
|
|
job: job,
|
|
|
|
upCount: ups,
|
|
|
|
targetsTotal: len(statuses),
|
|
|
|
targetsStatus: targetsStatuses,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
sort.Slice(jts, func(i, j int) bool {
|
|
|
|
return jts[i].job < jts[j].job
|
|
|
|
})
|
2021-06-18 09:53:10 +02:00
|
|
|
emptyJobs := getEmptyJobs(jts, jobNames)
|
2022-04-19 17:26:21 +02:00
|
|
|
var err error
|
|
|
|
jts, err = filterTargets(jts, endpointSearch, labelSearch)
|
|
|
|
if len(endpointSearch) > 0 || len(labelSearch) > 0 {
|
|
|
|
// Do not show empty jobs if target filters are set.
|
|
|
|
emptyJobs = nil
|
|
|
|
}
|
|
|
|
return jts, emptyJobs, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterTargetsByEndpoint(jts []jobTargetsStatuses, searchQuery string) ([]jobTargetsStatuses, error) {
|
|
|
|
if searchQuery == "" {
|
|
|
|
return jts, nil
|
|
|
|
}
|
|
|
|
finder, err := regexp.Compile(searchQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse %s: %w", searchQuery, err)
|
|
|
|
}
|
|
|
|
var jtsFiltered []jobTargetsStatuses
|
|
|
|
for _, job := range jts {
|
|
|
|
var tss []targetStatus
|
|
|
|
for _, ts := range job.targetsStatus {
|
|
|
|
if finder.MatchString(ts.sw.Config.ScrapeURL) {
|
|
|
|
tss = append(tss, ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tss) == 0 {
|
|
|
|
// Skip jobs with zero targets after filtering, so users could see only the requested targets
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
job.targetsStatus = tss
|
|
|
|
jtsFiltered = append(jtsFiltered, job)
|
|
|
|
}
|
|
|
|
return jtsFiltered, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterTargetsByLabels(jts []jobTargetsStatuses, searchQuery string) ([]jobTargetsStatuses, error) {
|
|
|
|
if searchQuery == "" {
|
|
|
|
return jts, nil
|
|
|
|
}
|
|
|
|
var ie promrelabel.IfExpression
|
|
|
|
if err := ie.Parse(searchQuery); err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse %s: %w", searchQuery, err)
|
|
|
|
}
|
|
|
|
var jtsFiltered []jobTargetsStatuses
|
|
|
|
for _, job := range jts {
|
|
|
|
var tss []targetStatus
|
|
|
|
for _, ts := range job.targetsStatus {
|
|
|
|
if ie.Match(ts.sw.Config.Labels) {
|
|
|
|
tss = append(tss, ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tss) == 0 {
|
|
|
|
// Skip jobs with zero targets after filtering, so users could see only the requested targets
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
job.targetsStatus = tss
|
|
|
|
jtsFiltered = append(jtsFiltered, job)
|
|
|
|
}
|
|
|
|
return jtsFiltered, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterTargets(jts []jobTargetsStatuses, endpointQuery, labelQuery string) ([]jobTargetsStatuses, error) {
|
|
|
|
var err error
|
|
|
|
jts, err = filterTargetsByEndpoint(jts, endpointQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
jts, err = filterTargetsByLabels(jts, labelQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return jts, nil
|
2021-06-18 09:53:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func getEmptyJobs(jts []jobTargetsStatuses, jobNames []string) []string {
|
|
|
|
jobNamesMap := make(map[string]struct{}, len(jobNames))
|
|
|
|
for _, jobName := range jobNames {
|
|
|
|
jobNamesMap[jobName] = struct{}{}
|
|
|
|
}
|
|
|
|
for i := range jts {
|
|
|
|
delete(jobNamesMap, jts[i].job)
|
|
|
|
}
|
|
|
|
emptyJobs := make([]string, 0, len(jobNamesMap))
|
|
|
|
for k := range jobNamesMap {
|
|
|
|
emptyJobs = append(emptyJobs, k)
|
|
|
|
}
|
|
|
|
sort.Strings(emptyJobs)
|
|
|
|
return emptyJobs
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTargetsHTML writes targets status grouped by job into writer w in html table,
|
|
|
|
// accepts filter to show only unhealthy targets.
|
2022-04-19 17:26:21 +02:00
|
|
|
func (tsm *targetStatusMap) WriteTargetsHTML(w io.Writer, showOnlyUnhealthy bool, endpointSearch, labelSearch string) {
|
|
|
|
jss, emptyJobs, err := tsm.getTargetsStatusByJob(endpointSearch, labelSearch)
|
|
|
|
WriteTargetsResponseHTML(w, jss, emptyJobs, showOnlyUnhealthy, endpointSearch, labelSearch, err)
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTargetsPlain writes targets grouped by job into writer w in plain text,
|
|
|
|
// accept filter to show original labels.
|
2022-04-19 17:26:21 +02:00
|
|
|
func (tsm *targetStatusMap) WriteTargetsPlain(w io.Writer, showOriginalLabels, showOnlyUnhealthy bool, endpointSearch, labelSearch string) {
|
|
|
|
jss, emptyJobs, err := tsm.getTargetsStatusByJob(endpointSearch, labelSearch)
|
|
|
|
WriteTargetsResponsePlain(w, jss, emptyJobs, showOriginalLabels, showOnlyUnhealthy, err)
|
2020-12-14 12:36:48 +01:00
|
|
|
}
|