Stop the containers if the process ends brutally
continuous-integration/drone/push Build is passing Details

pull/3/head
Clément FRÉVILLE 2 years ago
parent 4ec0b7f710
commit c1cca106b7

@ -4,7 +4,7 @@ namespace sk {
host::host(const std::string &ip, unsigned int connectionsMax) : ip{ip}, connections{0}, connectionsMax{connectionsMax}, runners{} {}
void host::addConnection(sk::runner &runner) {
runners.push(runner);
runners.push(&runner);
connections += 1;
}

@ -11,7 +11,7 @@ class host {
unsigned int connections;
unsigned int connectionsMax;
std::queue<sk::runner> runners;
std::queue<sk::runner *> runners;
public:
host(const std::string &ip, unsigned int connectionsMax);

@ -1,6 +1,7 @@
#include "network.hpp"
#include "program.hpp"
#include "runner.hpp"
#include <csignal>
#include <filesystem>
#include <iostream>
#include <spawn.h>
@ -28,6 +29,8 @@ sk::runner_backend detect_backend() {
return sk::runner_backend::BubbleWrap;
}
sk::runner *global_runner = nullptr;
int main(int argc, char **argv) {
int opt;
std::optional<sk::runner_backend> selected_backend;
@ -46,6 +49,15 @@ int main(int argc, char **argv) {
}
sk::runner runner(selected_backend.has_value() ? selected_backend.value() : detect_backend());
global_runner = &runner;
struct sigaction action {};
action.sa_handler = [](int) {
if (global_runner) {
global_runner->exit_active_jobs();
}
};
sigaction(SIGINT, &action, nullptr);
sigaction(SIGTERM, &action, nullptr);
if (optind < argc) {
std::ifstream t(argv[optind]);

@ -1,5 +1,6 @@
#include "runner.hpp"
#include <algorithm>
#include <array>
#include <cerrno>
#include <fcntl.h>
@ -75,6 +76,12 @@ run_result runner::run_blocking(const program &program) {
close(out_pipe[1]);
close(err_pipe[1]);
// Register the job as active
{
std::lock_guard<std::mutex> guard(active_jobs_mutex);
active_jobs.push_back(active_job{program.name, pid});
}
size_t len = program.code.size();
size_t window = 0;
const char *data = program.code.data();
@ -128,12 +135,11 @@ run_result runner::run_blocking(const program &program) {
if (pfds[i].fd == out_pipe[0]) {
out.append(buffer.data(), bytes_read);
} else if (pfds[i].fd == timerfd) {
if (backend == runner_backend::Docker) {
const char *const kill_args[] = {"docker", "kill", program.name.c_str(), nullptr};
pid_t kill_pid;
ensure(posix_spawnp(&kill_pid, kill_args[0], nullptr, nullptr, const_cast<char *const *>(kill_args), nullptr));
} else {
ensure(kill(pid, SIGINT));
std::lock_guard<std::mutex> guard(active_jobs_mutex);
auto it = std::find_if(active_jobs.begin(), active_jobs.end(), [pid](const active_job &job) { return job.pid == pid; });
if (it != active_jobs.end()) {
exit(*it);
active_jobs.erase(it);
}
killed = true;
} else {
@ -154,7 +160,34 @@ run_result runner::run_blocking(const program &program) {
close(err_pipe[0]);
close(timerfd);
// Remove the job from the active list
{
std::lock_guard<std::mutex> guard(active_jobs_mutex);
auto it = std::find_if(active_jobs.begin(), active_jobs.end(), [pid](const active_job &job) { return job.pid == pid; });
if (it != active_jobs.end()) {
active_jobs.erase(it);
}
}
posix_spawn_file_actions_destroy(&actions);
return run_result{out, err, killed ? 124 : exit_code};
}
void runner::exit_active_jobs() {
std::lock_guard<std::mutex> guard(active_jobs_mutex);
for (const auto &job : active_jobs) {
exit(job);
}
active_jobs.clear();
}
void runner::exit(const active_job &job) {
if (backend == runner_backend::Docker) {
const char *const kill_args[] = {"docker", "kill", job.job_id.c_str(), nullptr};
pid_t kill_pid;
ensure(posix_spawnp(&kill_pid, kill_args[0], nullptr, nullptr, const_cast<char *const *>(kill_args), nullptr));
} else {
ensure(kill(job.pid, SIGINT));
}
}
}

@ -1,7 +1,9 @@
#pragma once
#include "program.hpp"
#include <mutex>
#include <string>
#include <vector>
namespace sk {
struct [[nodiscard]] run_result {
@ -10,13 +12,24 @@ struct [[nodiscard]] run_result {
int exit_code;
};
struct [[nodiscard]] active_job {
std::string job_id;
pid_t pid;
};
enum class runner_backend { BubbleWrap, Docker };
class runner {
runner_backend backend;
std::vector<active_job> active_jobs;
std::mutex active_jobs_mutex;
public:
explicit runner(runner_backend backend);
run_result run_blocking(const program &program);
void exit_active_jobs();
private:
void exit(const active_job &job);
};
}

Loading…
Cancel
Save