2016-04-07 14:49:34 +02:00
|
|
|
package tasks
|
|
|
|
|
|
|
|
import (
|
2022-01-29 19:00:21 +01:00
|
|
|
"crypto/md5"
|
2016-04-08 21:41:20 +02:00
|
|
|
"encoding/json"
|
2016-04-07 14:49:34 +02:00
|
|
|
"fmt"
|
2022-01-30 12:22:18 +01:00
|
|
|
"github.com/ansible-semaphore/semaphore/lib"
|
2022-01-29 19:00:21 +01:00
|
|
|
"io"
|
2016-04-07 14:49:34 +02:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"strconv"
|
2016-11-22 02:01:57 +01:00
|
|
|
"strings"
|
2016-05-17 21:12:54 +02:00
|
|
|
"time"
|
2021-12-14 16:24:17 +01:00
|
|
|
|
|
|
|
log "github.com/Sirupsen/logrus"
|
|
|
|
"github.com/ansible-semaphore/semaphore/api/sockets"
|
|
|
|
"github.com/ansible-semaphore/semaphore/db"
|
|
|
|
"github.com/ansible-semaphore/semaphore/util"
|
2016-04-07 14:49:34 +02:00
|
|
|
)
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
type TaskRunner struct {
|
2021-09-02 15:57:46 +02:00
|
|
|
task db.Task
|
|
|
|
template db.Template
|
|
|
|
inventory db.Inventory
|
|
|
|
repository db.Repository
|
|
|
|
environment db.Environment
|
2022-01-30 12:22:18 +01:00
|
|
|
|
|
|
|
users []int
|
|
|
|
alert bool
|
2022-02-12 13:15:15 +01:00
|
|
|
alertChat *string
|
2022-01-30 12:22:18 +01:00
|
|
|
prepared bool
|
|
|
|
process *os.Process
|
|
|
|
pool *TaskPool
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func getMD5Hash(filepath string) (string, error) {
|
|
|
|
file, err := os.Open(filepath)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
hash := md5.New()
|
|
|
|
if _, err := io.Copy(hash, file); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *TaskRunner) getRepoPath() string {
|
2022-01-30 12:22:18 +01:00
|
|
|
repo := lib.GitRepository{
|
|
|
|
Logger: t,
|
|
|
|
TemplateID: t.template.ID,
|
|
|
|
Repository: t.repository,
|
|
|
|
}
|
2021-08-31 14:03:52 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
return repo.GetFullPath()
|
2021-10-12 23:17:11 +02:00
|
|
|
}
|
|
|
|
|
2022-02-12 13:15:15 +01:00
|
|
|
func (t *TaskRunner) setStatus(status db.TaskStatus) {
|
2022-01-29 19:00:21 +01:00
|
|
|
if t.task.Status == db.TaskStoppingStatus {
|
2021-08-25 17:37:19 +02:00
|
|
|
switch status {
|
2022-01-29 19:00:21 +01:00
|
|
|
case db.TaskFailStatus:
|
|
|
|
status = db.TaskStoppedStatus
|
|
|
|
case db.TaskStoppedStatus:
|
2021-08-25 17:37:19 +02:00
|
|
|
default:
|
2022-01-29 19:00:21 +01:00
|
|
|
panic("stopping TaskRunner cannot be " + status)
|
2021-08-25 17:37:19 +02:00
|
|
|
}
|
|
|
|
}
|
2021-10-26 18:54:19 +02:00
|
|
|
|
2021-08-25 17:37:19 +02:00
|
|
|
t.task.Status = status
|
2021-10-26 18:54:19 +02:00
|
|
|
|
2016-05-17 21:12:54 +02:00
|
|
|
t.updateStatus()
|
2021-10-26 18:54:19 +02:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
if status == db.TaskFailStatus {
|
2021-10-26 18:54:19 +02:00
|
|
|
t.sendMailAlert()
|
2021-10-26 19:18:31 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
if status == db.TaskSuccessStatus || status == db.TaskFailStatus {
|
2021-10-26 18:54:19 +02:00
|
|
|
t.sendTelegramAlert()
|
|
|
|
}
|
2021-08-25 17:37:19 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) updateStatus() {
|
2021-08-25 17:37:19 +02:00
|
|
|
for _, user := range t.users {
|
|
|
|
b, err := json.Marshal(&map[string]interface{}{
|
2021-10-25 11:42:34 +02:00
|
|
|
"type": "update",
|
|
|
|
"start": t.task.Start,
|
|
|
|
"end": t.task.End,
|
|
|
|
"status": t.task.Status,
|
|
|
|
"task_id": t.task.ID,
|
|
|
|
"template_id": t.task.TemplateID,
|
2022-01-30 12:22:18 +01:00
|
|
|
"project_id": t.task.ProjectID,
|
2021-12-14 16:24:17 +01:00
|
|
|
"version": t.task.Version,
|
2021-08-25 17:37:19 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
util.LogPanic(err)
|
|
|
|
|
|
|
|
sockets.Message(user, b)
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
if err := t.pool.store.UpdateTask(t.task); err != nil {
|
|
|
|
t.panicOnError(err, "Failed to update TaskRunner status")
|
2021-08-25 17:37:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) fail() {
|
|
|
|
t.setStatus(db.TaskFailStatus)
|
2016-04-17 02:20:23 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) destroyKeys() {
|
2022-02-06 16:35:58 +01:00
|
|
|
err := t.inventory.SSHKey.Destroy()
|
2021-08-31 00:12:33 +02:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Can't destroy inventory user key, error: " + err.Error())
|
2021-09-12 00:18:26 +02:00
|
|
|
}
|
|
|
|
|
2021-11-03 13:51:36 +01:00
|
|
|
err = t.inventory.BecomeKey.Destroy()
|
2021-09-12 00:18:26 +02:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Can't destroy inventory become user key, error: " + err.Error())
|
2021-09-12 00:18:26 +02:00
|
|
|
}
|
|
|
|
|
2021-11-03 13:51:36 +01:00
|
|
|
err = t.template.VaultKey.Destroy()
|
2021-09-12 00:18:26 +02:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Can't destroy inventory vault password file, error: " + err.Error())
|
2021-08-31 00:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) createTaskEvent() {
|
2021-10-13 16:07:22 +02:00
|
|
|
objType := db.EventTask
|
2022-02-12 13:15:15 +01:00
|
|
|
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " finished - " + strings.ToUpper(string(t.task.Status))
|
2021-08-30 21:42:11 +02:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err := t.pool.store.CreateEvent(db.Event{
|
2021-08-30 21:42:11 +02:00
|
|
|
UserID: t.task.UserID,
|
2022-01-30 12:22:18 +01:00
|
|
|
ProjectID: &t.task.ProjectID,
|
2021-08-30 21:42:11 +02:00
|
|
|
ObjectType: &objType,
|
|
|
|
ObjectID: &t.task.ID,
|
|
|
|
Description: &desc,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.panicOnError(err, "Fatal error inserting an event")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) prepareRun() {
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
t.prepared = false
|
2016-04-07 14:49:34 +02:00
|
|
|
|
|
|
|
defer func() {
|
2022-01-29 19:00:21 +01:00
|
|
|
log.Info("Stopped preparing TaskRunner " + strconv.Itoa(t.task.ID))
|
|
|
|
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
|
2022-02-12 13:15:15 +01:00
|
|
|
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
|
2016-04-17 20:01:51 +02:00
|
|
|
|
2021-08-30 21:42:11 +02:00
|
|
|
t.createTaskEvent()
|
2016-04-07 14:49:34 +02:00
|
|
|
}()
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Preparing: " + strconv.Itoa(t.task.ID))
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2022-03-30 17:31:00 +02:00
|
|
|
if err := checkTmpDir(util.Config.TmpPath); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Creating tmp dir failed: " + err.Error())
|
2018-02-15 21:29:03 +01:00
|
|
|
t.fail()
|
|
|
|
return
|
|
|
|
}
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2021-10-13 16:07:22 +02:00
|
|
|
objType := db.EventTask
|
2022-02-03 08:05:13 +01:00
|
|
|
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is preparing"
|
2022-03-30 17:31:00 +02:00
|
|
|
evt := db.Event{
|
2021-08-25 17:37:19 +02:00
|
|
|
UserID: t.task.UserID,
|
2022-01-30 12:22:18 +01:00
|
|
|
ProjectID: &t.task.ProjectID,
|
2016-04-17 20:01:51 +02:00
|
|
|
ObjectType: &objType,
|
|
|
|
ObjectID: &t.task.ID,
|
|
|
|
Description: &desc,
|
2022-03-30 17:31:00 +02:00
|
|
|
}
|
2020-12-01 20:06:49 +01:00
|
|
|
|
2022-03-30 17:31:00 +02:00
|
|
|
if _, err := t.pool.store.CreateEvent(evt); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Fatal error inserting an event")
|
2016-04-17 20:01:51 +02:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2022-02-03 08:05:13 +01:00
|
|
|
t.Log("Prepare TaskRunner with template: " + t.template.Name + "\n")
|
2021-12-14 16:24:17 +01:00
|
|
|
|
2021-10-25 11:42:34 +02:00
|
|
|
t.updateStatus()
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2022-03-30 17:31:00 +02:00
|
|
|
if t.repository.GetType() == db.RepositoryLocal {
|
|
|
|
if _, err := os.Stat(t.repository.GitURL); err != nil {
|
|
|
|
t.Log("Failed in finding static repository at " + t.repository.GitURL + ": " + err.Error())
|
2021-12-14 19:07:10 +01:00
|
|
|
t.fail()
|
|
|
|
return
|
2021-09-02 15:57:46 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := t.updateRepository(); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Failed updating repository: " + err.Error())
|
2021-09-02 15:57:46 +02:00
|
|
|
t.fail()
|
|
|
|
return
|
|
|
|
}
|
2022-03-30 17:31:00 +02:00
|
|
|
if err := t.checkoutRepository(); err != nil {
|
|
|
|
t.Log("Failed to checkout repository to required commit: " + err.Error())
|
|
|
|
t.fail()
|
|
|
|
return
|
|
|
|
}
|
2021-10-12 22:44:10 +02:00
|
|
|
}
|
|
|
|
|
2016-04-08 21:41:20 +02:00
|
|
|
if err := t.installInventory(); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Failed to install inventory: " + err.Error())
|
2016-04-17 02:20:23 +02:00
|
|
|
t.fail()
|
2016-04-08 21:41:20 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-15 18:06:22 +02:00
|
|
|
if err := t.installRequirements(); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running galaxy failed: " + err.Error())
|
2017-11-22 02:31:29 +01:00
|
|
|
t.fail()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-16 23:20:59 +02:00
|
|
|
if err := t.installVaultKeyFile(); err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Failed to install vault password file: " + err.Error())
|
2021-09-01 23:14:32 +02:00
|
|
|
t.fail()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
t.prepared = true
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) run() {
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
defer func() {
|
2022-01-29 19:00:21 +01:00
|
|
|
log.Info("Stopped running TaskRunner " + strconv.Itoa(t.task.ID))
|
|
|
|
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
|
2022-02-12 13:15:15 +01:00
|
|
|
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
t.task.End = &now
|
|
|
|
t.updateStatus()
|
2021-08-31 00:12:33 +02:00
|
|
|
t.createTaskEvent()
|
|
|
|
t.destroyKeys()
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
}()
|
|
|
|
|
2022-01-30 18:43:15 +01:00
|
|
|
// TODO: more details
|
2022-01-29 19:00:21 +01:00
|
|
|
if t.task.Status == db.TaskStoppingStatus {
|
|
|
|
t.setStatus(db.TaskStoppedStatus)
|
2021-08-25 17:37:19 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-10-25 11:42:34 +02:00
|
|
|
now := time.Now()
|
|
|
|
t.task.Start = &now
|
2022-01-29 19:00:21 +01:00
|
|
|
t.setStatus(db.TaskRunningStatus)
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2021-10-13 16:07:22 +02:00
|
|
|
objType := db.EventTask
|
2022-02-03 08:05:13 +01:00
|
|
|
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is running"
|
2020-12-01 20:06:49 +01:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err := t.pool.store.CreateEvent(db.Event{
|
2021-08-25 17:37:19 +02:00
|
|
|
UserID: t.task.UserID,
|
2022-01-30 12:22:18 +01:00
|
|
|
ProjectID: &t.task.ProjectID,
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
ObjectType: &objType,
|
|
|
|
ObjectID: &t.task.ID,
|
|
|
|
Description: &desc,
|
2020-12-01 20:06:49 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Fatal error inserting an event")
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Started: " + strconv.Itoa(t.task.ID))
|
2022-02-03 08:05:13 +01:00
|
|
|
t.Log("Run TaskRunner with template: " + t.template.Name + "\n")
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2022-01-30 18:43:15 +01:00
|
|
|
// TODO: ?????
|
2022-01-29 19:00:21 +01:00
|
|
|
if t.task.Status == db.TaskStoppingStatus {
|
|
|
|
t.setStatus(db.TaskStoppedStatus)
|
2021-08-25 17:37:19 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-01-19 20:35:59 +01:00
|
|
|
err = t.runPlaybook()
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2016-04-17 02:20:23 +02:00
|
|
|
t.fail()
|
2016-04-08 21:41:20 +02:00
|
|
|
return
|
|
|
|
}
|
2016-04-17 02:20:23 +02:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
t.setStatus(db.TaskSuccessStatus)
|
2022-01-19 20:35:59 +01:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
templates, err := t.pool.store.GetTemplates(t.task.ProjectID, db.TemplateFilter{
|
2022-01-19 20:35:59 +01:00
|
|
|
BuildTemplateID: &t.task.TemplateID,
|
|
|
|
AutorunOnly: true,
|
|
|
|
}, db.RetrieveQueryParams{})
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2022-01-19 20:35:59 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tpl := range templates {
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err = t.pool.AddTask(db.Task{
|
2022-01-19 20:35:59 +01:00
|
|
|
TemplateID: tpl.ID,
|
|
|
|
ProjectID: tpl.ProjectID,
|
|
|
|
BuildTaskID: &t.task.ID,
|
|
|
|
}, nil, tpl.ProjectID)
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2022-01-29 19:00:21 +01:00
|
|
|
continue
|
2022-01-19 20:35:59 +01:00
|
|
|
}
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) prepareError(err error, errMsg string) error {
|
2021-08-30 18:04:18 +02:00
|
|
|
if err == db.ErrNotFound {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log(errMsg)
|
2020-12-20 19:00:59 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.fail()
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-27 22:12:47 +02:00
|
|
|
//nolint: gocyclo
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) populateDetails() error {
|
2016-04-07 14:49:34 +02:00
|
|
|
// get template
|
2020-12-20 19:00:59 +01:00
|
|
|
var err error
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
t.template, err = t.pool.store.GetTemplate(t.task.ProjectID, t.task.TemplateID)
|
2020-12-20 19:00:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Template not found!")
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 16:36:09 +02:00
|
|
|
// get project alert setting
|
2022-01-29 19:00:21 +01:00
|
|
|
project, err := t.pool.store.GetProject(t.template.ProjectID)
|
2020-12-20 19:00:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Project not found!")
|
2017-03-10 07:25:42 +01:00
|
|
|
}
|
2020-12-20 19:00:59 +01:00
|
|
|
|
2017-05-03 06:27:58 +02:00
|
|
|
t.alert = project.Alert
|
2018-03-27 22:12:47 +02:00
|
|
|
t.alertChat = project.AlertChat
|
2017-03-10 01:12:55 +01:00
|
|
|
|
2016-04-17 12:41:36 +02:00
|
|
|
// get project users
|
2022-01-29 19:00:21 +01:00
|
|
|
users, err := t.pool.store.GetProjectUsers(t.template.ProjectID, db.RetrieveQueryParams{})
|
2021-03-12 21:30:17 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Users not found!")
|
|
|
|
}
|
2016-04-17 12:41:36 +02:00
|
|
|
|
|
|
|
t.users = []int{}
|
|
|
|
for _, user := range users {
|
|
|
|
t.users = append(t.users, user.ID)
|
|
|
|
}
|
|
|
|
|
2016-04-07 14:49:34 +02:00
|
|
|
// get inventory
|
2022-01-29 19:00:21 +01:00
|
|
|
t.inventory, err = t.pool.store.GetInventory(t.template.ProjectID, t.template.InventoryID)
|
2020-12-20 19:00:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Template Inventory not found!")
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// get repository
|
2022-01-29 19:00:21 +01:00
|
|
|
t.repository, err = t.pool.store.GetRepository(t.template.ProjectID, t.template.RepositoryID)
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2021-05-06 10:32:13 +02:00
|
|
|
if err != nil {
|
2016-04-07 14:49:34 +02:00
|
|
|
return err
|
|
|
|
}
|
2021-05-06 10:32:13 +02:00
|
|
|
|
2022-02-06 13:17:08 +01:00
|
|
|
err = t.repository.SSHKey.DeserializeSecret()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-04-07 14:49:34 +02:00
|
|
|
// get environment
|
2021-11-02 18:37:31 +01:00
|
|
|
if t.template.EnvironmentID != nil {
|
2022-01-29 19:00:21 +01:00
|
|
|
t.environment, err = t.pool.store.GetEnvironment(t.template.ProjectID, *t.template.EnvironmentID)
|
2016-04-07 14:49:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-02 18:37:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if t.task.Environment != "" {
|
|
|
|
environment := make(map[string]interface{})
|
|
|
|
if t.environment.JSON != "" {
|
|
|
|
err = json.Unmarshal([]byte(t.task.Environment), &environment)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
taskEnvironment := make(map[string]interface{})
|
|
|
|
err = json.Unmarshal([]byte(t.environment.JSON), &taskEnvironment)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range taskEnvironment {
|
|
|
|
environment[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
var ev []byte
|
|
|
|
ev, err = json.Marshal(environment)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
t.environment.JSON = string(ev)
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) installVaultKeyFile() error {
|
2021-09-16 23:20:59 +02:00
|
|
|
if t.template.VaultKeyID == nil {
|
2021-09-01 23:14:32 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-02-05 19:42:09 +01:00
|
|
|
return t.template.VaultKey.Install(db.AccessKeyRoleAnsiblePasswordVault)
|
2021-09-01 23:14:32 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) checkoutRepository() error {
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
repo := lib.GitRepository{
|
|
|
|
Logger: t,
|
|
|
|
TemplateID: t.template.ID,
|
|
|
|
Repository: t.repository,
|
2021-10-12 22:44:10 +02:00
|
|
|
}
|
2021-10-14 12:35:42 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
err := repo.ValidateRepo()
|
2021-10-12 22:44:10 +02:00
|
|
|
|
2022-01-30 12:29:33 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
if t.task.CommitHash != nil {
|
|
|
|
// checkout to commit if it is provided for TaskRunner
|
|
|
|
return repo.Checkout(*t.task.CommitHash)
|
2021-10-12 22:44:10 +02:00
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
// store commit to TaskRunner table
|
2021-10-14 12:35:42 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
commitHash, err := repo.GetLastCommitHash()
|
2021-10-14 12:35:42 +02:00
|
|
|
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
return err
|
2021-10-14 12:35:42 +02:00
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
commitMessage, _ := repo.GetLastCommitMessage()
|
2016-04-08 21:41:20 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
t.task.CommitHash = &commitHash
|
|
|
|
t.task.CommitMessage = commitMessage
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
return t.pool.store.UpdateTask(t.task)
|
2022-01-20 15:53:48 +01:00
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
func (t *TaskRunner) updateRepository() error {
|
|
|
|
repo := lib.GitRepository{
|
|
|
|
Logger: t,
|
|
|
|
TemplateID: t.template.ID,
|
|
|
|
Repository: t.repository,
|
2016-11-22 02:07:00 +01:00
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
err := repo.ValidateRepo()
|
2021-10-12 23:17:11 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
2022-01-30 12:22:18 +01:00
|
|
|
err = os.RemoveAll(repo.GetFullPath())
|
2022-01-20 15:53:48 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-10-12 23:17:11 +02:00
|
|
|
}
|
2022-01-30 12:22:18 +01:00
|
|
|
return repo.Clone()
|
2022-01-20 15:53:48 +01:00
|
|
|
}
|
2021-10-12 23:17:11 +02:00
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
if repo.CanBePulled() {
|
|
|
|
err = repo.Pull()
|
2022-01-20 15:53:48 +01:00
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
err = os.RemoveAll(repo.GetFullPath())
|
2022-01-20 15:53:48 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-01-30 12:22:18 +01:00
|
|
|
return repo.Clone()
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
func (t *TaskRunner) installCollectionsRequirements() error {
|
|
|
|
requirementsFilePath := fmt.Sprintf("%s/collections/requirements.yml", t.getRepoPath())
|
|
|
|
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
|
2021-04-15 18:40:16 +02:00
|
|
|
|
2021-04-15 18:07:49 +02:00
|
|
|
if _, err := os.Stat(requirementsFilePath); err != nil {
|
2022-03-17 17:18:56 +01:00
|
|
|
t.Log("No collections/requirements.yml file found. Skip galaxy install process.\n")
|
2021-04-15 18:07:49 +02:00
|
|
|
return nil
|
|
|
|
}
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
|
|
|
|
if err := t.runGalaxy([]string{
|
2022-03-17 17:22:40 +01:00
|
|
|
"collection",
|
2022-03-17 17:18:56 +01:00
|
|
|
"install",
|
|
|
|
"-r",
|
|
|
|
requirementsFilePath,
|
|
|
|
"--force",
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Log("collections/requirements.yml has no changes. Skip galaxy install process.\n")
|
|
|
|
}
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
return nil
|
|
|
|
}
|
2021-04-15 18:07:49 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
func (t *TaskRunner) installRolesRequirements() error {
|
|
|
|
requirementsFilePath := fmt.Sprintf("%s/roles/requirements.yml", t.getRepoPath())
|
|
|
|
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
if _, err := os.Stat(requirementsFilePath); err != nil {
|
|
|
|
t.Log("No roles/requirements.yml file found. Skip galaxy install process.\n")
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2021-04-15 18:40:16 +02:00
|
|
|
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
|
|
|
|
if err := t.runGalaxy([]string{
|
2022-03-17 20:28:14 +01:00
|
|
|
"role",
|
2021-04-15 18:40:16 +02:00
|
|
|
"install",
|
|
|
|
"-r",
|
2022-03-17 17:18:56 +01:00
|
|
|
requirementsFilePath,
|
2021-04-15 18:40:16 +02:00
|
|
|
"--force",
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("roles/requirements.yml has no changes. Skip galaxy install process.\n")
|
2021-04-15 18:06:22 +02:00
|
|
|
}
|
2022-03-30 17:31:00 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
return nil
|
|
|
|
}
|
2021-04-15 18:06:22 +02:00
|
|
|
|
2022-03-17 17:18:56 +01:00
|
|
|
func (t *TaskRunner) installRequirements() error {
|
|
|
|
if err := t.installCollectionsRequirements(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := t.installRolesRequirements(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-15 18:06:22 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) runGalaxy(args []string) error {
|
2022-01-30 14:04:09 +01:00
|
|
|
return lib.AnsiblePlaybook{
|
|
|
|
Logger: t,
|
|
|
|
TemplateID: t.template.ID,
|
|
|
|
Repository: t.repository,
|
|
|
|
}.RunGalaxy(args)
|
2016-06-30 17:34:35 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) runPlaybook() (err error) {
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
args, err := t.getPlaybookArgs()
|
|
|
|
if err != nil {
|
2021-08-25 17:37:19 +02:00
|
|
|
return
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
}
|
|
|
|
|
2022-02-06 16:35:58 +01:00
|
|
|
return lib.AnsiblePlaybook{
|
2022-01-30 14:04:09 +01:00
|
|
|
Logger: t,
|
|
|
|
TemplateID: t.template.ID,
|
|
|
|
Repository: t.repository,
|
2022-02-06 16:35:58 +01:00
|
|
|
}.RunPlaybook(args, func(p *os.Process) { t.process = p })
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
}
|
|
|
|
|
2022-02-06 16:35:58 +01:00
|
|
|
func (t *TaskRunner) getEnvironmentExtraVars() (str string, err error) {
|
2021-09-01 23:14:32 +02:00
|
|
|
extraVars := make(map[string]interface{})
|
|
|
|
|
|
|
|
if t.environment.JSON != "" {
|
2021-09-12 00:18:26 +02:00
|
|
|
err = json.Unmarshal([]byte(t.environment.JSON), &extraVars)
|
2021-09-01 23:14:32 +02:00
|
|
|
if err != nil {
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
2021-09-01 23:14:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-19 22:49:33 +01:00
|
|
|
taskDetails := make(map[string]interface{})
|
2021-11-02 18:57:02 +01:00
|
|
|
|
2022-01-19 22:49:33 +01:00
|
|
|
if t.task.Message != "" {
|
|
|
|
taskDetails["message"] = t.task.Message
|
|
|
|
}
|
2021-11-02 18:57:02 +01:00
|
|
|
|
2022-01-19 22:49:33 +01:00
|
|
|
if t.task.UserID != nil {
|
|
|
|
var user db.User
|
2022-01-29 19:00:21 +01:00
|
|
|
user, err = t.pool.store.GetUser(*t.task.UserID)
|
2022-01-19 23:08:34 +01:00
|
|
|
if err == nil {
|
|
|
|
taskDetails["username"] = user.Username
|
2021-11-02 18:57:02 +01:00
|
|
|
}
|
2022-01-19 22:49:33 +01:00
|
|
|
}
|
2021-11-02 18:57:02 +01:00
|
|
|
|
2022-01-19 22:49:33 +01:00
|
|
|
if t.template.Type != db.TemplateTask {
|
|
|
|
taskDetails["type"] = t.template.Type
|
2022-01-29 19:00:21 +01:00
|
|
|
incomingVersion := t.task.GetIncomingVersion(t.pool.store)
|
2022-01-19 22:49:33 +01:00
|
|
|
if incomingVersion != nil {
|
|
|
|
taskDetails["incoming_version"] = incomingVersion
|
|
|
|
}
|
|
|
|
if t.template.Type == db.TemplateBuild {
|
|
|
|
taskDetails["target_version"] = t.task.Version
|
2021-11-02 08:16:20 +01:00
|
|
|
}
|
2021-10-14 21:14:21 +02:00
|
|
|
}
|
|
|
|
|
2022-01-19 22:49:33 +01:00
|
|
|
vars := make(map[string]interface{})
|
|
|
|
vars["task_details"] = taskDetails
|
|
|
|
extraVars["semaphore_vars"] = vars
|
|
|
|
|
2021-09-01 23:14:32 +02:00
|
|
|
ev, err := json.Marshal(extraVars)
|
|
|
|
if err != nil {
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
2021-09-01 23:14:32 +02:00
|
|
|
}
|
|
|
|
|
2021-09-12 00:18:26 +02:00
|
|
|
str = string(ev)
|
|
|
|
|
|
|
|
return
|
2021-09-01 23:14:32 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 22:12:47 +02:00
|
|
|
//nolint: gocyclo
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) getPlaybookArgs() (args []string, err error) {
|
2016-04-08 21:41:20 +02:00
|
|
|
playbookName := t.task.Playbook
|
2021-09-13 15:22:08 +02:00
|
|
|
if playbookName == "" {
|
2016-04-08 21:41:20 +02:00
|
|
|
playbookName = t.template.Playbook
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2018-03-16 02:26:25 +01:00
|
|
|
var inventory string
|
2017-10-26 09:00:48 +02:00
|
|
|
switch t.inventory.Type {
|
2021-09-12 00:18:26 +02:00
|
|
|
case db.InventoryFile:
|
2017-10-26 09:00:48 +02:00
|
|
|
inventory = t.inventory.Inventory
|
2022-05-24 17:54:33 +02:00
|
|
|
case db.InventoryStatic, db.InventoryStaticYaml:
|
2017-10-26 09:00:48 +02:00
|
|
|
inventory = util.Config.TmpPath + "/inventory_" + strconv.Itoa(t.task.ID)
|
2022-05-24 17:54:33 +02:00
|
|
|
if t.inventory.Type == db.InventoryStaticYaml {
|
|
|
|
inventory += ".yml"
|
|
|
|
}
|
2022-02-05 18:34:34 +01:00
|
|
|
default:
|
|
|
|
err = fmt.Errorf("invalid invetory type")
|
|
|
|
return
|
2017-10-26 09:00:48 +02:00
|
|
|
}
|
|
|
|
|
2021-09-12 00:18:26 +02:00
|
|
|
args = []string{
|
2017-10-26 09:00:48 +02:00
|
|
|
"-i", inventory,
|
2016-04-08 21:41:20 +02:00
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2021-09-12 00:18:26 +02:00
|
|
|
if t.inventory.SSHKeyID != nil {
|
|
|
|
switch t.inventory.SSHKey.Type {
|
|
|
|
case db.AccessKeySSH:
|
|
|
|
args = append(args, "--private-key="+t.inventory.SSHKey.GetPath())
|
2022-02-05 19:42:09 +01:00
|
|
|
//args = append(args, "--extra-vars={\"ansible_ssh_private_key_file\": \""+t.inventory.SSHKey.GetPath()+"\"}")
|
|
|
|
if t.inventory.SSHKey.SshKey.Login != "" {
|
|
|
|
args = append(args, "--extra-vars={\"ansible_user\": \""+t.inventory.SSHKey.SshKey.Login+"\"}")
|
|
|
|
}
|
2021-09-12 00:18:26 +02:00
|
|
|
case db.AccessKeyLoginPassword:
|
|
|
|
args = append(args, "--extra-vars=@"+t.inventory.SSHKey.GetPath())
|
|
|
|
case db.AccessKeyNone:
|
|
|
|
default:
|
2022-02-05 18:34:34 +01:00
|
|
|
err = fmt.Errorf("access key does not suite for inventory's user credentials")
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-13 15:22:08 +02:00
|
|
|
if t.inventory.BecomeKeyID != nil {
|
|
|
|
switch t.inventory.BecomeKey.Type {
|
2021-09-12 00:18:26 +02:00
|
|
|
case db.AccessKeyLoginPassword:
|
2021-09-13 15:22:08 +02:00
|
|
|
args = append(args, "--extra-vars=@"+t.inventory.BecomeKey.GetPath())
|
2021-09-12 00:18:26 +02:00
|
|
|
case db.AccessKeyNone:
|
|
|
|
default:
|
2022-02-05 18:34:34 +01:00
|
|
|
err = fmt.Errorf("access key does not suite for inventory's sudo user credentials")
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
|
|
|
}
|
2016-04-08 21:41:20 +02:00
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2016-04-08 21:41:20 +02:00
|
|
|
if t.task.Debug {
|
|
|
|
args = append(args, "-vvvv")
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2016-06-30 16:57:45 +02:00
|
|
|
if t.task.DryRun {
|
|
|
|
args = append(args, "--check")
|
|
|
|
}
|
|
|
|
|
2021-09-16 23:20:59 +02:00
|
|
|
if t.template.VaultKeyID != nil {
|
|
|
|
args = append(args, "--vault-password-file", t.template.VaultKey.GetPath())
|
2021-09-01 23:14:32 +02:00
|
|
|
}
|
2018-02-14 15:54:04 +01:00
|
|
|
|
2022-02-06 16:35:58 +01:00
|
|
|
extraVars, err := t.getEnvironmentExtraVars()
|
2021-09-01 23:14:32 +02:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log(err.Error())
|
|
|
|
t.Log("Could not remove command environment, if existant it will be passed to --extra-vars. This is not fatal but be aware of side effects")
|
2021-09-01 23:14:32 +02:00
|
|
|
} else if extraVars != "" {
|
|
|
|
args = append(args, "--extra-vars", extraVars)
|
2016-04-08 21:41:20 +02:00
|
|
|
}
|
|
|
|
|
2018-09-11 13:49:03 +02:00
|
|
|
var templateExtraArgs []string
|
2016-04-08 21:41:20 +02:00
|
|
|
if t.template.Arguments != nil {
|
2021-09-12 00:18:26 +02:00
|
|
|
err = json.Unmarshal([]byte(*t.template.Arguments), &templateExtraArgs)
|
2018-09-11 13:49:03 +02:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Invalid format of the template extra arguments, must be valid JSON")
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
2016-04-08 21:41:20 +02:00
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-01-26 20:51:20 +01:00
|
|
|
var taskExtraArgs []string
|
|
|
|
if t.template.AllowOverrideArgsInTask && t.task.Arguments != nil {
|
|
|
|
err = json.Unmarshal([]byte(*t.task.Arguments), &taskExtraArgs)
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Invalid format of the TaskRunner extra arguments, must be valid JSON")
|
2022-01-26 20:51:20 +01:00
|
|
|
return
|
|
|
|
}
|
2016-04-08 21:41:20 +02:00
|
|
|
}
|
2021-09-12 00:18:26 +02:00
|
|
|
|
2022-01-26 20:51:20 +01:00
|
|
|
args = append(args, templateExtraArgs...)
|
|
|
|
args = append(args, taskExtraArgs...)
|
|
|
|
args = append(args, playbookName)
|
|
|
|
|
2021-09-12 00:18:26 +02:00
|
|
|
return
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
2017-02-08 13:26:15 +01:00
|
|
|
|
2021-04-15 18:39:19 +02:00
|
|
|
func hasRequirementsChanges(requirementsFilePath string, requirementsHashFilePath string) bool {
|
|
|
|
oldFileMD5HashBytes, err := ioutil.ReadFile(requirementsHashFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
newFileMD5Hash, err := getMD5Hash(requirementsFilePath)
|
2021-04-15 18:39:19 +02:00
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return string(oldFileMD5HashBytes) != newFileMD5Hash
|
|
|
|
}
|
|
|
|
|
2021-04-15 18:39:36 +02:00
|
|
|
func writeMD5Hash(requirementsFile string, requirementsHashFile string) error {
|
2022-01-29 19:00:21 +01:00
|
|
|
newFileMD5Hash, err := getMD5Hash(requirementsFile)
|
2021-04-15 18:39:36 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ioutil.WriteFile(requirementsHashFile, []byte(newFileMD5Hash), 0644)
|
|
|
|
}
|
|
|
|
|
2018-02-15 21:29:03 +01:00
|
|
|
// checkTmpDir checks to see if the temporary directory exists
|
|
|
|
// and if it does not attempts to create it
|
|
|
|
func checkTmpDir(path string) error {
|
2018-03-27 22:12:47 +02:00
|
|
|
var err error
|
|
|
|
if _, err = os.Stat(path); err != nil {
|
2018-02-15 21:29:03 +01:00
|
|
|
if os.IsNotExist(err) {
|
2018-02-28 10:02:54 +01:00
|
|
|
return os.MkdirAll(path, 0700)
|
2018-02-15 21:29:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
2021-05-06 10:32:13 +02:00
|
|
|
}
|