2023-08-27 18:02:51 +02:00
|
|
|
package runners
|
|
|
|
|
|
|
|
import (
|
2023-08-29 00:51:04 +02:00
|
|
|
"bytes"
|
2023-08-27 18:02:51 +02:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2024-03-10 20:07:19 +01:00
|
|
|
"io"
|
2023-08-27 18:02:51 +02:00
|
|
|
"net/http"
|
2023-08-29 00:51:04 +02:00
|
|
|
"os"
|
2023-08-27 18:02:51 +02:00
|
|
|
"strconv"
|
2023-09-20 02:17:41 +02:00
|
|
|
"sync/atomic"
|
2023-08-27 18:02:51 +02:00
|
|
|
"time"
|
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
"github.com/ansible-semaphore/semaphore/db"
|
|
|
|
|
2024-04-12 12:21:05 +02:00
|
|
|
"github.com/ansible-semaphore/semaphore/db_lib"
|
2024-04-12 12:32:54 +02:00
|
|
|
"github.com/ansible-semaphore/semaphore/pkg/task_logger"
|
2024-04-12 12:21:05 +02:00
|
|
|
"github.com/ansible-semaphore/semaphore/services/tasks"
|
|
|
|
"github.com/ansible-semaphore/semaphore/util"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
)
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
type JobPool struct {
|
|
|
|
// logger channel used to putting log records to database.
|
2023-08-29 00:51:04 +02:00
|
|
|
logger chan jobLogRecord
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
// register channel used to put tasks to queue.
|
|
|
|
register chan *job
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
runningJobs map[int]*runningJob
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
queue []*job
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
//token *string
|
2023-09-20 02:17:41 +02:00
|
|
|
|
2023-09-20 03:01:28 +02:00
|
|
|
processing int32
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
|
2023-09-20 01:03:58 +02:00
|
|
|
func (p *JobPool) existsInQueue(taskID int) bool {
|
|
|
|
for _, j := range p.queue {
|
|
|
|
if j.job.Task.ID == taskID {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-09-12 19:40:22 +02:00
|
|
|
func (p *JobPool) hasRunningJobs() bool {
|
|
|
|
for _, j := range p.runningJobs {
|
|
|
|
if !j.status.IsFinished() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-09-29 18:07:15 +02:00
|
|
|
func (p *JobPool) Register() (err error) {
|
2024-09-29 20:53:33 +02:00
|
|
|
//if util.Config.Runner.RegistrationToken == "" {
|
|
|
|
// return fmt.Errorf("runner registration token required")
|
|
|
|
//}
|
2024-09-29 18:07:15 +02:00
|
|
|
|
|
|
|
if util.Config.Runner.TokenFile == "" {
|
|
|
|
return fmt.Errorf("runner token file required")
|
|
|
|
}
|
|
|
|
|
|
|
|
ok := p.tryRegisterRunner()
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("runner registration failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-03-27 14:43:41 +01:00
|
|
|
func (p *JobPool) Unregister() (err error) {
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
if util.Config.Runner.Token == "" {
|
2024-03-27 14:43:41 +01:00
|
|
|
return fmt.Errorf("runner is not registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
client := &http.Client{}
|
|
|
|
|
2024-09-29 20:53:33 +02:00
|
|
|
url := util.Config.WebHost + "/api/internal/runners"
|
2024-03-27 14:43:41 +01:00
|
|
|
|
|
|
|
req, err := http.NewRequest("DELETE", url, nil)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode >= 400 && resp.StatusCode != 404 {
|
|
|
|
err = fmt.Errorf("encountered error while unregistering runner; server returned code %d", resp.StatusCode)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
if util.Config.Runner.TokenFile != "" {
|
|
|
|
err = os.Remove(util.Config.Runner.TokenFile)
|
2024-03-27 14:43:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
func (p *JobPool) Run() {
|
2024-09-29 18:07:15 +02:00
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
if util.Config.Runner.Token == "" {
|
2024-09-29 18:07:15 +02:00
|
|
|
panic("runner token required. Please register runner first or create it from web interface.")
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
queueTicker := time.NewTicker(5 * time.Second)
|
2023-09-10 23:18:25 +02:00
|
|
|
requestTimer := time.NewTicker(1 * time.Second)
|
2023-08-29 00:51:04 +02:00
|
|
|
p.runningJobs = make(map[int]*runningJob)
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
defer func() {
|
2023-08-29 00:51:04 +02:00
|
|
|
queueTicker.Stop()
|
2023-09-20 03:01:28 +02:00
|
|
|
requestTimer.Stop()
|
2023-08-27 18:02:51 +02:00
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
case <-queueTicker.C: // timer 5 seconds: get task from queue and run it
|
2023-08-27 18:02:51 +02:00
|
|
|
if len(p.queue) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
t := p.queue[0]
|
2024-04-12 12:32:54 +02:00
|
|
|
if t.status == task_logger.TaskFailStatus {
|
2023-08-27 18:02:51 +02:00
|
|
|
//delete failed TaskRunner from queue
|
|
|
|
p.queue = p.queue[1:]
|
2023-10-01 16:38:05 +02:00
|
|
|
log.Info("Task " + strconv.Itoa(t.job.Task.ID) + " dequeued (failed)")
|
2023-08-27 18:02:51 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-09-10 23:18:25 +02:00
|
|
|
p.runningJobs[t.job.Task.ID] = &runningJob{
|
|
|
|
job: t.job,
|
|
|
|
}
|
2024-03-12 01:44:04 +01:00
|
|
|
|
|
|
|
t.job.Logger = t.job.App.SetLogger(p.runningJobs[t.job.Task.ID])
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2023-09-10 23:18:25 +02:00
|
|
|
go func(runningJob *runningJob) {
|
2024-04-12 12:32:54 +02:00
|
|
|
runningJob.SetStatus(task_logger.TaskRunningStatus)
|
2023-09-10 23:18:25 +02:00
|
|
|
|
|
|
|
err := runningJob.job.Run(t.username, t.incomingVersion)
|
|
|
|
|
|
|
|
if runningJob.status.IsFinished() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2024-04-12 12:32:54 +02:00
|
|
|
if runningJob.status == task_logger.TaskStoppingStatus {
|
|
|
|
runningJob.SetStatus(task_logger.TaskStoppedStatus)
|
2023-09-10 23:18:25 +02:00
|
|
|
} else {
|
2024-04-12 12:32:54 +02:00
|
|
|
runningJob.SetStatus(task_logger.TaskFailStatus)
|
2023-09-10 23:18:25 +02:00
|
|
|
}
|
|
|
|
} else {
|
2024-04-12 12:32:54 +02:00
|
|
|
runningJob.SetStatus(task_logger.TaskSuccessStatus)
|
2023-09-10 23:18:25 +02:00
|
|
|
}
|
2023-10-01 16:38:05 +02:00
|
|
|
|
|
|
|
log.Info("Task " + strconv.Itoa(runningJob.job.Task.ID) + " finished (" + string(runningJob.status) + ")")
|
2023-09-10 23:18:25 +02:00
|
|
|
}(p.runningJobs[t.job.Task.ID])
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
p.queue = p.queue[1:]
|
2023-10-01 16:38:05 +02:00
|
|
|
log.Info("Task " + strconv.Itoa(t.job.Task.ID) + " dequeued")
|
|
|
|
log.Info("Task " + strconv.Itoa(t.job.Task.ID) + " started")
|
2023-08-29 00:51:04 +02:00
|
|
|
|
|
|
|
case <-requestTimer.C:
|
|
|
|
|
2023-09-20 03:01:28 +02:00
|
|
|
go func() {
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2023-09-20 03:01:28 +02:00
|
|
|
if !atomic.CompareAndSwapInt32(&p.processing, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
2023-09-12 19:40:22 +02:00
|
|
|
|
2023-09-20 03:01:28 +02:00
|
|
|
defer atomic.StoreInt32(&p.processing, 0)
|
|
|
|
|
|
|
|
p.sendProgress()
|
|
|
|
|
|
|
|
if util.Config.Runner.OneOff && len(p.runningJobs) > 0 && !p.hasRunningJobs() {
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
2023-09-12 19:40:22 +02:00
|
|
|
|
2023-09-20 03:01:28 +02:00
|
|
|
p.checkNewJobs()
|
|
|
|
}()
|
2023-09-12 19:40:22 +02:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *JobPool) sendProgress() {
|
|
|
|
|
|
|
|
client := &http.Client{}
|
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
url := util.Config.WebHost + "/api/internal/runners"
|
2023-08-29 00:51:04 +02:00
|
|
|
|
|
|
|
body := RunnerProgress{
|
|
|
|
Jobs: nil,
|
|
|
|
}
|
|
|
|
|
|
|
|
for id, j := range p.runningJobs {
|
2023-09-20 03:01:28 +02:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
body.Jobs = append(body.Jobs, JobProgress{
|
|
|
|
ID: id,
|
|
|
|
LogRecords: j.logRecords,
|
|
|
|
Status: j.status,
|
|
|
|
})
|
|
|
|
|
2023-09-10 23:18:25 +02:00
|
|
|
j.logRecords = make([]LogRecord, 0)
|
2023-09-20 03:01:28 +02:00
|
|
|
|
|
|
|
if j.status.IsFinished() {
|
2023-10-01 16:38:05 +02:00
|
|
|
log.Info("Task " + strconv.Itoa(id) + " removed from running list")
|
2023-09-20 03:01:28 +02:00
|
|
|
delete(p.runningJobs, id)
|
|
|
|
}
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
jsonBytes, err := json.Marshal(body)
|
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
if err != nil {
|
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "sending progress",
|
|
|
|
"action": "form request body",
|
|
|
|
"error": "can not marshal json",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonBytes))
|
|
|
|
if err != nil {
|
2024-10-13 12:49:28 +02:00
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "sending progress",
|
|
|
|
"action": "create request",
|
|
|
|
"error": "can not create request to the server",
|
|
|
|
})
|
2023-08-29 00:51:04 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
req.Header.Set("X-Runner-Token", util.Config.Runner.Token)
|
2024-01-07 20:50:37 +01:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil {
|
2024-10-13 12:49:28 +02:00
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "sending progress",
|
|
|
|
"action": "send request",
|
|
|
|
"error": "server returned an error",
|
|
|
|
"status": resp.StatusCode,
|
|
|
|
})
|
2023-08-29 00:51:04 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
defer resp.Body.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *JobPool) tryRegisterRunner() bool {
|
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
log.Info("Registering a new runner")
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
if util.Config.Runner.RegistrationToken == "" {
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
util.LogErrorWithFields(fmt.Errorf("registration token cannot be empty"), log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "form request",
|
|
|
|
"error": "can not retrieve registration token",
|
|
|
|
})
|
2024-09-29 20:53:33 +02:00
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
}
|
2023-08-29 00:51:04 +02:00
|
|
|
|
|
|
|
client := &http.Client{}
|
|
|
|
|
2024-09-29 20:53:33 +02:00
|
|
|
url := util.Config.WebHost + "/api/internal/runners"
|
2023-08-29 00:51:04 +02:00
|
|
|
|
|
|
|
jsonBytes, err := json.Marshal(RunnerRegistration{
|
2024-10-13 12:49:28 +02:00
|
|
|
RegistrationToken: util.Config.Runner.RegistrationToken,
|
2023-09-16 23:47:06 +02:00
|
|
|
Webhook: util.Config.Runner.Webhook,
|
2023-09-20 01:03:58 +02:00
|
|
|
MaxParallelTasks: util.Config.Runner.MaxParallelTasks,
|
2023-08-29 00:51:04 +02:00
|
|
|
})
|
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
if err != nil {
|
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "form request",
|
|
|
|
"error": "can not marshal json",
|
|
|
|
})
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBytes))
|
|
|
|
if err != nil {
|
2024-10-13 12:49:28 +02:00
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "create request",
|
|
|
|
"error": "can not create request to the server",
|
|
|
|
})
|
2023-08-29 00:51:04 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
|
|
|
if err != nil || resp.StatusCode != 200 {
|
2024-10-13 12:49:28 +02:00
|
|
|
|
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "execute request",
|
|
|
|
"error": "the server returned an error",
|
|
|
|
"status": resp.StatusCode,
|
|
|
|
})
|
2023-08-29 00:51:04 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-03-10 20:07:19 +01:00
|
|
|
body, err := io.ReadAll(resp.Body)
|
2023-08-29 00:51:04 +02:00
|
|
|
if err != nil {
|
2024-10-13 12:49:28 +02:00
|
|
|
|
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "read response body",
|
|
|
|
"error": "can not read server's response body",
|
|
|
|
})
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
var res struct {
|
|
|
|
Token string `json:"token"`
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
err = json.Unmarshal(body, &res)
|
2023-08-29 00:51:04 +02:00
|
|
|
if err != nil {
|
2024-10-13 12:49:28 +02:00
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "parsing result json",
|
|
|
|
"error": "server's response has invalid format",
|
|
|
|
})
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
return false
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
|
2024-09-29 11:57:02 +02:00
|
|
|
err = os.WriteFile(util.Config.Runner.TokenFile, []byte(res.Token), 0644)
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-10-13 12:49:28 +02:00
|
|
|
if err != nil {
|
|
|
|
util.LogErrorWithFields(err, log.Fields{
|
|
|
|
"context": "registration",
|
|
|
|
"action": "storing token",
|
|
|
|
"error": "can not store token to the file",
|
|
|
|
})
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
return true
|
2023-08-27 18:02:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkNewJobs tries to find runner to queued jobs
|
|
|
|
func (p *JobPool) checkNewJobs() {
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
if util.Config.Runner.Token == "" {
|
2024-09-29 11:57:02 +02:00
|
|
|
fmt.Println("Error creating request:", "no token provided")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
client := &http.Client{}
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-09-29 20:53:33 +02:00
|
|
|
url := util.Config.WebHost + "/api/internal/runners"
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
req, err := http.NewRequest("GET", url, nil)
|
2023-08-29 00:51:04 +02:00
|
|
|
|
2024-09-29 21:58:21 +02:00
|
|
|
req.Header.Set("X-Runner-Token", util.Config.Runner.Token)
|
2024-01-07 17:35:02 +01:00
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println("Error creating request:", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req)
|
2024-01-07 14:36:48 +01:00
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
if err != nil {
|
|
|
|
fmt.Println("Error making request:", err)
|
|
|
|
return
|
|
|
|
}
|
2024-01-07 14:36:48 +01:00
|
|
|
|
2024-01-07 14:37:22 +01:00
|
|
|
if resp.StatusCode >= 400 {
|
2024-03-27 12:04:30 +01:00
|
|
|
log.Error("Encountered error while checking for new jobs; server returned code ", resp.StatusCode)
|
2024-01-07 14:36:48 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-08-27 18:02:51 +02:00
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2024-03-10 20:07:19 +01:00
|
|
|
body, err := io.ReadAll(resp.Body)
|
2023-08-27 18:02:51 +02:00
|
|
|
if err != nil {
|
2024-03-27 12:04:30 +01:00
|
|
|
log.Error("Encountered error while checking for new jobs; unable to read response body:", err)
|
2023-08-27 18:02:51 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
var response RunnerState
|
2023-08-27 18:02:51 +02:00
|
|
|
err = json.Unmarshal(body, &response)
|
|
|
|
if err != nil {
|
2024-01-07 14:36:48 +01:00
|
|
|
log.Error("Checking new jobs, parsing JSON error:", err)
|
2023-08-27 18:02:51 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-09-10 23:18:25 +02:00
|
|
|
for _, currJob := range response.CurrentJobs {
|
|
|
|
runJob, exists := p.runningJobs[currJob.ID]
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-04-12 12:32:54 +02:00
|
|
|
if runJob.status == task_logger.TaskStoppingStatus || runJob.status == task_logger.TaskStoppedStatus {
|
2023-09-10 23:18:25 +02:00
|
|
|
p.runningJobs[currJob.ID].job.Kill()
|
|
|
|
}
|
2024-03-12 03:03:43 +01:00
|
|
|
|
|
|
|
if runJob.status.IsFinished() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch runJob.status {
|
2024-04-12 12:32:54 +02:00
|
|
|
case task_logger.TaskRunningStatus:
|
|
|
|
if currJob.Status == task_logger.TaskStartingStatus || currJob.Status == task_logger.TaskWaitingStatus {
|
2024-03-12 03:03:43 +01:00
|
|
|
continue
|
|
|
|
}
|
2024-04-12 12:32:54 +02:00
|
|
|
case task_logger.TaskStoppingStatus:
|
2024-03-12 03:03:43 +01:00
|
|
|
if !currJob.Status.IsFinished() {
|
|
|
|
continue
|
|
|
|
}
|
2024-04-12 12:32:54 +02:00
|
|
|
case task_logger.TaskConfirmed:
|
|
|
|
if currJob.Status == task_logger.TaskWaitingConfirmation {
|
2024-03-12 03:03:43 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runJob.SetStatus(currJob.Status)
|
2023-09-10 23:18:25 +02:00
|
|
|
}
|
|
|
|
|
2023-09-12 19:40:22 +02:00
|
|
|
if util.Config.Runner.OneOff {
|
|
|
|
if len(p.queue) > 0 || len(p.runningJobs) > 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
for _, newJob := range response.NewJobs {
|
|
|
|
if _, exists := p.runningJobs[newJob.Task.ID]; exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-09-20 01:03:58 +02:00
|
|
|
if p.existsInQueue(newJob.Task.ID) {
|
|
|
|
continue
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
|
|
|
|
2024-05-27 22:00:40 +02:00
|
|
|
newJob.Inventory.Repository = newJob.InventoryRepository
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
taskRunner := job{
|
|
|
|
username: newJob.Username,
|
|
|
|
incomingVersion: newJob.IncomingVersion,
|
2023-08-27 18:02:51 +02:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
job: &tasks.LocalJob{
|
|
|
|
Task: newJob.Task,
|
|
|
|
Template: newJob.Template,
|
|
|
|
Inventory: newJob.Inventory,
|
|
|
|
Repository: newJob.Repository,
|
|
|
|
Environment: newJob.Environment,
|
2024-07-17 17:29:12 +02:00
|
|
|
App: db_lib.CreateApp(
|
|
|
|
newJob.Template,
|
|
|
|
newJob.Repository,
|
|
|
|
newJob.Inventory,
|
|
|
|
nil),
|
2023-08-29 00:51:04 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
taskRunner.job.Repository.SSHKey = response.AccessKeys[taskRunner.job.Repository.SSHKeyID]
|
|
|
|
|
|
|
|
if taskRunner.job.Inventory.SSHKeyID != nil {
|
|
|
|
taskRunner.job.Inventory.SSHKey = response.AccessKeys[*taskRunner.job.Inventory.SSHKeyID]
|
|
|
|
}
|
|
|
|
|
|
|
|
if taskRunner.job.Inventory.BecomeKeyID != nil {
|
|
|
|
taskRunner.job.Inventory.BecomeKey = response.AccessKeys[*taskRunner.job.Inventory.BecomeKeyID]
|
|
|
|
}
|
|
|
|
|
2024-10-04 19:47:17 +02:00
|
|
|
var vaults []db.TemplateVault
|
2024-10-03 21:41:36 +02:00
|
|
|
if taskRunner.job.Template.Vaults != nil {
|
|
|
|
for _, vault := range taskRunner.job.Template.Vaults {
|
2024-10-04 19:47:17 +02:00
|
|
|
vault := vault
|
|
|
|
key := response.AccessKeys[vault.VaultKeyID]
|
|
|
|
vault.Vault = &key
|
|
|
|
vaults = append(vaults, vault)
|
2024-10-03 21:41:36 +02:00
|
|
|
}
|
2023-09-11 02:00:10 +02:00
|
|
|
}
|
2024-10-04 19:47:17 +02:00
|
|
|
taskRunner.job.Template.Vaults = vaults
|
2023-09-11 02:00:10 +02:00
|
|
|
|
2024-05-27 22:00:40 +02:00
|
|
|
if taskRunner.job.Inventory.RepositoryID != nil {
|
|
|
|
taskRunner.job.Inventory.Repository.SSHKey = response.AccessKeys[taskRunner.job.Inventory.Repository.SSHKeyID]
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
p.queue = append(p.queue, &taskRunner)
|
2023-10-01 16:38:05 +02:00
|
|
|
log.Info("Task " + strconv.Itoa(taskRunner.job.Task.ID) + " enqueued")
|
2023-08-29 00:51:04 +02:00
|
|
|
}
|
2023-08-27 18:02:51 +02:00
|
|
|
}
|