Early init of procfs (#1315)

Minor change to match naming convention in other collectors.

Initialize the proc or sys FS instance once while initializing
each collector instead of re-creating for each metric update.

Signed-off-by: Paul Gier <pgier@redhat.com>
This commit is contained in:
Paul Gier 2019-04-10 11:16:12 -05:00 committed by Ben Kochie
parent fbe390709f
commit b1298677aa
8 changed files with 63 additions and 47 deletions

View File

@ -30,6 +30,7 @@ const (
)
type buddyinfoCollector struct {
fs procfs.FS
desc *prometheus.Desc
}
@ -44,18 +45,17 @@ func NewBuddyinfoCollector() (Collector, error) {
"Count of free blocks according to size.",
[]string{"node", "zone", "size"}, nil,
)
return &buddyinfoCollector{desc}, nil
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %v", err)
}
return &buddyinfoCollector{fs, desc}, nil
}
// Update calls (*buddyinfoCollector).getBuddyInfo to get the platform specific
// buddyinfo metrics.
func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return fmt.Errorf("failed to open procfs: %v", err)
}
buddyInfo, err := fs.NewBuddyInfo()
buddyInfo, err := c.fs.NewBuddyInfo()
if err != nil {
return fmt.Errorf("couldn't get buddyinfo: %s", err)
}

View File

@ -26,6 +26,7 @@ import (
)
type cpuCollector struct {
fs procfs.FS
cpu *prometheus.Desc
cpuGuest *prometheus.Desc
cpuCoreThrottle *prometheus.Desc
@ -38,7 +39,12 @@ func init() {
// NewCPUCollector returns a new Collector exposing kernel/system statistics.
func NewCPUCollector() (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %v", err)
}
return &cpuCollector{
fs: fs,
cpu: nodeCPUSecondsDesc,
cpuGuest: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "guest_seconds_total"),
@ -149,11 +155,7 @@ func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error
// updateStat reads /proc/stat through procfs and exports cpu related metrics.
func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return fmt.Errorf("failed to open procfs: %v", err)
}
stats, err := fs.NewStat()
stats, err := c.fs.NewStat()
if err != nil {
return err
}

View File

@ -23,6 +23,7 @@ import (
)
type cpuFreqCollector struct {
fs sysfs.FS
cpuFreq *prometheus.Desc
cpuFreqMin *prometheus.Desc
cpuFreqMax *prometheus.Desc
@ -37,7 +38,13 @@ func init() {
// NewCPUFreqCollector returns a new Collector exposing kernel/system statistics.
func NewCPUFreqCollector() (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %v", err)
}
return &cpuFreqCollector{
fs: fs,
cpuFreq: prometheus.NewDesc(
prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"),
"Current cpu thread frequency in hertz.",
@ -73,12 +80,7 @@ func NewCPUFreqCollector() (Collector, error) {
// Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/.
func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return fmt.Errorf("failed to open sysfs: %v", err)
}
cpuFreqs, err := fs.NewSystemCpufreq()
cpuFreqs, err := c.fs.NewSystemCpufreq()
if err != nil {
return err
}

View File

@ -58,7 +58,7 @@ func newIPVSCollector() (*ipvsCollector, error) {
c.fs, err = procfs.NewFS(*procPath)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to open procfs: %v", err)
}
c.connections = typedDesc{prometheus.NewDesc(

View File

@ -30,6 +30,7 @@ var (
)
type netClassCollector struct {
fs sysfs.FS
subsystem string
ignoredDevicesPattern *regexp.Regexp
metricDescs map[string]*prometheus.Desc
@ -41,8 +42,13 @@ func init() {
// NewNetClassCollector returns a new Collector exposing network class stats.
func NewNetClassCollector() (Collector, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, fmt.Errorf("failed to open sysfs: %v", err)
}
pattern := regexp.MustCompile(*netclassIgnoredDevices)
return &netClassCollector{
fs: fs,
subsystem: "network",
ignoredDevicesPattern: pattern,
metricDescs: map[string]*prometheus.Desc{},
@ -50,7 +56,7 @@ func NewNetClassCollector() (Collector, error) {
}
func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error {
netClass, err := getNetClassInfo(c.ignoredDevicesPattern)
netClass, err := c.getNetClassInfo()
if err != nil {
return fmt.Errorf("could not get net class info: %s", err)
}
@ -162,19 +168,15 @@ func pushMetric(ch chan<- prometheus.Metric, subsystem string, name string, valu
ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, float64(value), ifaceName)
}
func getNetClassInfo(ignore *regexp.Regexp) (sysfs.NetClass, error) {
fs, err := sysfs.NewFS(*sysPath)
if err != nil {
return nil, err
}
netClass, err := fs.NewNetClass()
func (c *netClassCollector) getNetClassInfo() (sysfs.NetClass, error) {
netClass, err := c.fs.NewNetClass()
if err != nil {
return netClass, fmt.Errorf("error obtaining net class info: %s", err)
}
for device := range netClass {
if ignore.MatchString(device) {
if c.ignoredDevicesPattern.MatchString(device) {
delete(netClass, device)
}
}

View File

@ -25,6 +25,7 @@ import (
)
type processCollector struct {
fs procfs.FS
threadAlloc *prometheus.Desc
threadLimit *prometheus.Desc
procsState *prometheus.Desc
@ -38,8 +39,13 @@ func init() {
// NewProcessStatCollector returns a new Collector exposing process data read from the proc filesystem.
func NewProcessStatCollector() (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %v", err)
}
subsystem := "processes"
return &processCollector{
fs: fs,
threadAlloc: prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "threads"),
"Allocated threads in system",
@ -63,39 +69,35 @@ func NewProcessStatCollector() (Collector, error) {
),
}, nil
}
func (t *processCollector) Update(ch chan<- prometheus.Metric) error {
pids, states, threads, err := getAllocatedThreads()
func (c *processCollector) Update(ch chan<- prometheus.Metric) error {
pids, states, threads, err := c.getAllocatedThreads()
if err != nil {
return fmt.Errorf("unable to retrieve number of allocated threads: %q", err)
}
ch <- prometheus.MustNewConstMetric(t.threadAlloc, prometheus.GaugeValue, float64(threads))
ch <- prometheus.MustNewConstMetric(c.threadAlloc, prometheus.GaugeValue, float64(threads))
maxThreads, err := readUintFromFile(procFilePath("sys/kernel/threads-max"))
if err != nil {
return fmt.Errorf("unable to retrieve limit number of threads: %q", err)
}
ch <- prometheus.MustNewConstMetric(t.threadLimit, prometheus.GaugeValue, float64(maxThreads))
ch <- prometheus.MustNewConstMetric(c.threadLimit, prometheus.GaugeValue, float64(maxThreads))
for state := range states {
ch <- prometheus.MustNewConstMetric(t.procsState, prometheus.GaugeValue, float64(states[state]), state)
ch <- prometheus.MustNewConstMetric(c.procsState, prometheus.GaugeValue, float64(states[state]), state)
}
pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max"))
if err != nil {
return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %q", err)
}
ch <- prometheus.MustNewConstMetric(t.pidUsed, prometheus.GaugeValue, float64(pids))
ch <- prometheus.MustNewConstMetric(t.pidMax, prometheus.GaugeValue, float64(pidM))
ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids))
ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM))
return nil
}
func getAllocatedThreads() (int, map[string]int32, int, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return 0, nil, 0, err
}
p, err := fs.AllProcs()
func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, error) {
p, err := c.fs.AllProcs()
if err != nil {
return 0, nil, 0, err
}

View File

@ -18,7 +18,8 @@ package collector
import (
"testing"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/prometheus/procfs"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
func TestReadProcessStatus(t *testing.T) {
@ -26,7 +27,12 @@ func TestReadProcessStatus(t *testing.T) {
t.Fatal(err)
}
want := 1
pids, states, threads, err := getAllocatedThreads()
fs, err := procfs.NewFS(*procPath)
if err != nil {
t.Errorf("failed to open procfs: %v", err)
}
c := processCollector{fs: fs}
pids, states, threads, err := c.getAllocatedThreads()
if err != nil {
t.Fatalf("Cannot retrieve data from procfs getAllocatedThreads function: %v ", err)
}

View File

@ -24,6 +24,7 @@ import (
)
type statCollector struct {
fs procfs.FS
intr *prometheus.Desc
ctxt *prometheus.Desc
forks *prometheus.Desc
@ -38,7 +39,12 @@ func init() {
// NewStatCollector returns a new Collector exposing kernel/system statistics.
func NewStatCollector() (Collector, error) {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return nil, fmt.Errorf("failed to open procfs: %v", err)
}
return &statCollector{
fs: fs,
intr: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "intr_total"),
"Total number of interrupts serviced.",
@ -74,11 +80,7 @@ func NewStatCollector() (Collector, error) {
// Update implements Collector and exposes kernel and system statistics.
func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
fs, err := procfs.NewFS(*procPath)
if err != nil {
return fmt.Errorf("failed to open procfs: %v", err)
}
stats, err := fs.NewStat()
stats, err := c.fs.NewStat()
if err != nil {
return err
}