2016-04-07 14:49:34 +02:00
|
|
|
package tasks
|
|
|
|
|
|
|
|
import (
|
2016-04-08 21:41:20 +02:00
|
|
|
"encoding/json"
|
2016-04-07 14:49:34 +02:00
|
|
|
"os"
|
|
|
|
"strconv"
|
2016-11-22 02:01:57 +01:00
|
|
|
"strings"
|
2016-05-17 21:12:54 +02:00
|
|
|
"time"
|
2021-12-14 16:24:17 +01:00
|
|
|
|
|
|
|
"github.com/ansible-semaphore/semaphore/api/sockets"
|
|
|
|
"github.com/ansible-semaphore/semaphore/db"
|
2024-04-12 12:32:54 +02:00
|
|
|
"github.com/ansible-semaphore/semaphore/pkg/task_logger"
|
2021-12-14 16:24:17 +01:00
|
|
|
"github.com/ansible-semaphore/semaphore/util"
|
2024-03-12 01:44:04 +01:00
|
|
|
log "github.com/sirupsen/logrus"
|
2016-04-07 14:49:34 +02:00
|
|
|
)
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
type Job interface {
|
|
|
|
Run(username string, incomingVersion *string) error
|
|
|
|
Kill()
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
type TaskRunner struct {
|
2023-08-29 00:51:04 +02:00
|
|
|
Task db.Task
|
|
|
|
Template db.Template
|
|
|
|
Inventory db.Inventory
|
|
|
|
Repository db.Repository
|
|
|
|
Environment db.Environment
|
2022-01-30 12:22:18 +01:00
|
|
|
|
|
|
|
users []int
|
|
|
|
alert bool
|
2022-02-12 13:15:15 +01:00
|
|
|
alertChat *string
|
2022-01-30 12:22:18 +01:00
|
|
|
pool *TaskPool
|
2023-08-27 18:02:51 +02:00
|
|
|
|
|
|
|
// job executes Ansible and returns stdout to Semaphore logs
|
2023-08-29 00:51:04 +02:00
|
|
|
job Job
|
|
|
|
|
|
|
|
RunnerID int
|
|
|
|
Username string
|
|
|
|
IncomingVersion *string
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2023-08-30 11:13:48 +02:00
|
|
|
func (t *TaskRunner) saveStatus() {
|
2021-08-25 17:37:19 +02:00
|
|
|
for _, user := range t.users {
|
|
|
|
b, err := json.Marshal(&map[string]interface{}{
|
2021-10-25 11:42:34 +02:00
|
|
|
"type": "update",
|
2023-08-29 00:51:04 +02:00
|
|
|
"start": t.Task.Start,
|
|
|
|
"end": t.Task.End,
|
|
|
|
"status": t.Task.Status,
|
|
|
|
"task_id": t.Task.ID,
|
|
|
|
"template_id": t.Task.TemplateID,
|
|
|
|
"project_id": t.Task.ProjectID,
|
|
|
|
"version": t.Task.Version,
|
2021-08-25 17:37:19 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
util.LogPanic(err)
|
|
|
|
|
|
|
|
sockets.Message(user, b)
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
if err := t.pool.store.UpdateTask(t.Task); err != nil {
|
2022-01-29 19:00:21 +01:00
|
|
|
t.panicOnError(err, "Failed to update TaskRunner status")
|
2021-08-25 17:37:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
func (t *TaskRunner) kill() {
|
|
|
|
t.job.Kill()
|
2016-04-17 02:20:23 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) createTaskEvent() {
|
2021-10-13 16:07:22 +02:00
|
|
|
objType := db.EventTask
|
2023-08-29 00:51:04 +02:00
|
|
|
desc := "Task ID " + strconv.Itoa(t.Task.ID) + " (" + t.Template.Name + ")" + " finished - " + strings.ToUpper(string(t.Task.Status))
|
2021-08-30 21:42:11 +02:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err := t.pool.store.CreateEvent(db.Event{
|
2023-08-29 00:51:04 +02:00
|
|
|
UserID: t.Task.UserID,
|
|
|
|
ProjectID: &t.Task.ProjectID,
|
2021-08-30 21:42:11 +02:00
|
|
|
ObjectType: &objType,
|
2023-08-29 00:51:04 +02:00
|
|
|
ObjectID: &t.Task.ID,
|
2021-08-30 21:42:11 +02:00
|
|
|
Description: &desc,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.panicOnError(err, "Fatal error inserting an event")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) run() {
|
2022-11-20 09:15:29 +01:00
|
|
|
if !t.pool.store.PermanentConnection() {
|
2023-08-29 00:51:04 +02:00
|
|
|
t.pool.store.Connect("run task " + strconv.Itoa(t.Task.ID))
|
|
|
|
defer t.pool.store.Close("run task " + strconv.Itoa(t.Task.ID))
|
2022-11-20 09:15:29 +01:00
|
|
|
}
|
|
|
|
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
defer func() {
|
2023-08-29 00:51:04 +02:00
|
|
|
log.Info("Stopped running TaskRunner " + strconv.Itoa(t.Task.ID))
|
|
|
|
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.Task.ID))
|
2022-02-12 13:15:15 +01:00
|
|
|
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
|
|
|
now := time.Now()
|
2023-08-29 00:51:04 +02:00
|
|
|
t.Task.End = &now
|
2023-08-30 11:13:48 +02:00
|
|
|
t.saveStatus()
|
2021-08-31 00:12:33 +02:00
|
|
|
t.createTaskEvent()
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
}()
|
|
|
|
|
2023-09-12 20:58:44 +02:00
|
|
|
// Mark task as stopped if user stopped task during preparation (before task run).
|
2024-04-12 12:32:54 +02:00
|
|
|
if t.Task.Status == task_logger.TaskStoppingStatus {
|
|
|
|
t.SetStatus(task_logger.TaskStoppedStatus)
|
2021-08-25 17:37:19 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-12 12:32:54 +02:00
|
|
|
t.SetStatus(task_logger.TaskStartingStatus)
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2021-10-13 16:07:22 +02:00
|
|
|
objType := db.EventTask
|
2023-08-29 00:51:04 +02:00
|
|
|
desc := "Task ID " + strconv.Itoa(t.Task.ID) + " (" + t.Template.Name + ")" + " is running"
|
2020-12-01 20:06:49 +01:00
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err := t.pool.store.CreateEvent(db.Event{
|
2023-08-29 00:51:04 +02:00
|
|
|
UserID: t.Task.UserID,
|
|
|
|
ProjectID: &t.Task.ProjectID,
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
ObjectType: &objType,
|
2023-08-29 00:51:04 +02:00
|
|
|
ObjectID: &t.Task.ID,
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
Description: &desc,
|
2020-12-01 20:06:49 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Fatal error inserting an event")
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
t.Log("Started: " + strconv.Itoa(t.Task.ID))
|
|
|
|
t.Log("Run TaskRunner with template: " + t.Template.Name + "\n")
|
Allow concurrency for tasks that does not collide
Two different concurrency modes are implemented, and is enabled by
setting "concurrency_mode" in the config file to either "project" or "node".
When "project" concurrency is enabled, tasks will run in parallel if and
only if they do not share the same project id, with no regard to the
nodes/hosts that are affected.
When "node" concurrency is enabled, a task will run in parallel if and
only if the hosts affected by tasks already running does not intersect
with the hosts that would be affected by the task in question.
If "concurrency_mode" is not specified, no task will start before the
previous one has finished.
The collision check is based on the output from the "--list-hosts"
argument to ansible, which uses the hosts specified in the inventory.
Thus, if two different hostnames are used that points to the same node,
such as "127.0.0.1" and "localhost", there will be no collision and two
tasks may connect to the same node concurrently. If this behaviour is
not desired, one should make sure to not include aliases for their hosts
in their inventories when enabling concurrency mode.
To restrict the amount of parallel tasks that runs at the same time, one
can add the "max_parallel_tasks" to the config file. This defaults to a
humble 10 if not specified.
2017-05-29 17:27:56 +02:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
var username string
|
|
|
|
var incomingVersion *string
|
|
|
|
|
|
|
|
if t.Task.UserID != nil {
|
|
|
|
var user db.User
|
|
|
|
user, err = t.pool.store.GetUser(*t.Task.UserID)
|
|
|
|
if err == nil {
|
|
|
|
username = user.Username
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if t.Template.Type != db.TemplateTask {
|
|
|
|
incomingVersion = t.Task.GetIncomingVersion(t.pool.store)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
err = t.job.Run(username, incomingVersion)
|
|
|
|
|
2022-01-19 20:35:59 +01:00
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2024-04-12 12:32:54 +02:00
|
|
|
t.SetStatus(task_logger.TaskFailStatus)
|
2016-04-08 21:41:20 +02:00
|
|
|
return
|
|
|
|
}
|
2016-04-17 02:20:23 +02:00
|
|
|
|
2024-04-12 12:32:54 +02:00
|
|
|
if t.Task.Status == task_logger.TaskRunningStatus {
|
|
|
|
t.SetStatus(task_logger.TaskSuccessStatus)
|
2023-09-10 23:18:25 +02:00
|
|
|
}
|
2022-01-19 20:35:59 +01:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
templates, err := t.pool.store.GetTemplates(t.Task.ProjectID, db.TemplateFilter{
|
|
|
|
BuildTemplateID: &t.Task.TemplateID,
|
2022-01-19 20:35:59 +01:00
|
|
|
AutorunOnly: true,
|
|
|
|
}, db.RetrieveQueryParams{})
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2022-01-19 20:35:59 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tpl := range templates {
|
2022-01-29 19:00:21 +01:00
|
|
|
_, err = t.pool.AddTask(db.Task{
|
2022-01-19 20:35:59 +01:00
|
|
|
TemplateID: tpl.ID,
|
|
|
|
ProjectID: tpl.ProjectID,
|
2023-08-29 00:51:04 +02:00
|
|
|
BuildTaskID: &t.Task.ID,
|
2022-01-19 20:35:59 +01:00
|
|
|
}, nil, tpl.ProjectID)
|
|
|
|
if err != nil {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log("Running playbook failed: " + err.Error())
|
2022-01-29 19:00:21 +01:00
|
|
|
continue
|
2022-01-19 20:35:59 +01:00
|
|
|
}
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) prepareError(err error, errMsg string) error {
|
2021-08-30 18:04:18 +02:00
|
|
|
if err == db.ErrNotFound {
|
2022-01-30 12:22:18 +01:00
|
|
|
t.Log(errMsg)
|
2020-12-20 19:00:59 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2024-04-12 12:32:54 +02:00
|
|
|
t.SetStatus(task_logger.TaskFailStatus)
|
2020-12-20 19:00:59 +01:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-29 12:00:56 +02:00
|
|
|
// nolint: gocyclo
|
2022-01-29 19:00:21 +01:00
|
|
|
func (t *TaskRunner) populateDetails() error {
|
2016-04-07 14:49:34 +02:00
|
|
|
// get template
|
2020-12-20 19:00:59 +01:00
|
|
|
var err error
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
t.Template, err = t.pool.store.GetTemplate(t.Task.ProjectID, t.Task.TemplateID)
|
2020-12-20 19:00:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Template not found!")
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 16:36:09 +02:00
|
|
|
// get project alert setting
|
2023-08-29 00:51:04 +02:00
|
|
|
project, err := t.pool.store.GetProject(t.Template.ProjectID)
|
2020-12-20 19:00:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Project not found!")
|
2017-03-10 07:25:42 +01:00
|
|
|
}
|
2020-12-20 19:00:59 +01:00
|
|
|
|
2017-05-03 06:27:58 +02:00
|
|
|
t.alert = project.Alert
|
2018-03-27 22:12:47 +02:00
|
|
|
t.alertChat = project.AlertChat
|
2017-03-10 01:12:55 +01:00
|
|
|
|
2016-04-17 12:41:36 +02:00
|
|
|
// get project users
|
2024-04-05 13:02:06 +02:00
|
|
|
projectUsers, err := t.pool.store.GetProjectUsers(t.Template.ProjectID, db.RetrieveQueryParams{})
|
2021-03-12 21:30:17 +01:00
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Users not found!")
|
|
|
|
}
|
2016-04-17 12:41:36 +02:00
|
|
|
|
2024-04-05 13:02:06 +02:00
|
|
|
users := make(map[int]bool)
|
|
|
|
|
|
|
|
for _, user := range projectUsers {
|
|
|
|
users[user.ID] = true
|
2016-04-17 12:41:36 +02:00
|
|
|
}
|
|
|
|
|
2024-04-02 23:50:52 +02:00
|
|
|
admins, err := t.pool.store.GetAllAdmins()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-04-05 13:02:06 +02:00
|
|
|
for _, admin := range admins {
|
|
|
|
users[admin.ID] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
t.users = []int{}
|
|
|
|
for userID := range users {
|
|
|
|
t.users = append(t.users, userID)
|
2024-04-02 23:50:52 +02:00
|
|
|
}
|
|
|
|
|
2016-04-07 14:49:34 +02:00
|
|
|
// get inventory
|
2024-04-19 18:47:08 +02:00
|
|
|
if t.Template.InventoryID != nil {
|
|
|
|
t.Inventory, err = t.pool.store.GetInventory(t.Template.ProjectID, *t.Template.InventoryID)
|
|
|
|
if err != nil {
|
|
|
|
return t.prepareError(err, "Template Inventory not found!")
|
|
|
|
}
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// get repository
|
2023-08-29 00:51:04 +02:00
|
|
|
t.Repository, err = t.pool.store.GetRepository(t.Template.ProjectID, t.Template.RepositoryID)
|
2016-04-07 14:49:34 +02:00
|
|
|
|
2021-05-06 10:32:13 +02:00
|
|
|
if err != nil {
|
2016-04-07 14:49:34 +02:00
|
|
|
return err
|
|
|
|
}
|
2021-05-06 10:32:13 +02:00
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
err = t.Repository.SSHKey.DeserializeSecret()
|
2022-02-06 13:17:08 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-04-07 14:49:34 +02:00
|
|
|
// get environment
|
2023-08-29 00:51:04 +02:00
|
|
|
if t.Template.EnvironmentID != nil {
|
|
|
|
t.Environment, err = t.pool.store.GetEnvironment(t.Template.ProjectID, *t.Template.EnvironmentID)
|
2016-04-07 14:49:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-02 18:37:31 +01:00
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
if t.Task.Environment != "" {
|
2021-11-02 18:37:31 +01:00
|
|
|
environment := make(map[string]interface{})
|
2023-08-29 00:51:04 +02:00
|
|
|
if t.Environment.JSON != "" {
|
|
|
|
err = json.Unmarshal([]byte(t.Task.Environment), &environment)
|
2021-11-02 18:37:31 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
taskEnvironment := make(map[string]interface{})
|
2023-08-29 00:51:04 +02:00
|
|
|
err = json.Unmarshal([]byte(t.Environment.JSON), &taskEnvironment)
|
2021-11-02 18:37:31 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range taskEnvironment {
|
|
|
|
environment[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
var ev []byte
|
|
|
|
ev, err = json.Marshal(environment)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-29 00:51:04 +02:00
|
|
|
t.Environment.JSON = string(ev)
|
2016-04-07 14:49:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-15 21:29:03 +01:00
|
|
|
// checkTmpDir checks to see if the temporary directory exists
|
|
|
|
// and if it does not attempts to create it
|
|
|
|
func checkTmpDir(path string) error {
|
2018-03-27 22:12:47 +02:00
|
|
|
var err error
|
|
|
|
if _, err = os.Stat(path); err != nil {
|
2018-02-15 21:29:03 +01:00
|
|
|
if os.IsNotExist(err) {
|
2018-02-28 10:02:54 +01:00
|
|
|
return os.MkdirAll(path, 0700)
|
2018-02-15 21:29:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
2021-05-06 10:32:13 +02:00
|
|
|
}
|