VictoriaMetrics/lib/writeconcurrencylimiter/concurrencylimiter.go

74 lines
2.1 KiB
Go
Raw Normal View History

package writeconcurrencylimiter
2019-05-22 23:16:55 +02:00
import (
"flag"
2019-05-22 23:16:55 +02:00
"fmt"
"net/http"
2019-05-22 23:16:55 +02:00
"runtime"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
"github.com/VictoriaMetrics/metrics"
2019-05-22 23:16:55 +02:00
)
var (
maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts; see also -insert.maxQueueDuration")
maxQueueDuration = flag.Duration("insert.maxQueueDuration", time.Minute, "The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts")
2019-05-22 23:16:55 +02:00
)
// ch is the channel for limiting concurrent calls to Do.
var ch chan struct{}
// Init initializes concurrencylimiter.
//
// Init must be called after flag.Parse call.
func Init() {
ch = make(chan struct{}, *maxConcurrentInserts)
}
2019-05-22 23:16:55 +02:00
// Do calls f with the limited concurrency.
func Do(f func() error) error {
// Limit the number of conurrent f calls in order to prevent from excess
2019-05-22 23:16:55 +02:00
// memory usage and CPU trashing.
select {
case ch <- struct{}{}:
err := f()
<-ch
return err
default:
}
// All the workers are busy.
// Sleep for up to *maxQueueDuration.
concurrencyLimitReached.Inc()
t := timerpool.Get(*maxQueueDuration)
2019-05-22 23:16:55 +02:00
select {
case ch <- struct{}{}:
timerpool.Put(t)
2019-05-22 23:16:55 +02:00
err := f()
<-ch
return err
case <-t.C:
timerpool.Put(t)
concurrencyLimitTimeout.Inc()
return &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot handle more than %d concurrent inserts during %s; possible solutions: "+
"increase `-insert.maxQueueDuration`, increase `-maxConcurrentInserts`, increase server capacity", *maxConcurrentInserts, *maxQueueDuration),
StatusCode: http.StatusServiceUnavailable,
}
2019-05-22 23:16:55 +02:00
}
}
var (
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_insert_limit_reached_total`)
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_insert_limit_timeout_total`)
_ = metrics.NewGauge(`vm_concurrent_insert_capacity`, func() float64 {
return float64(cap(ch))
})
_ = metrics.NewGauge(`vm_concurrent_insert_current`, func() float64 {
return float64(len(ch))
})
)