2020-01-17 21:05:03 +08:00
|
|
|
// Copyright 2019 Google LLC
|
2019-03-19 00:21:48 +08:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
2022-01-28 17:38:27 +08:00
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
2019-03-19 00:21:48 +08:00
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Implementation file for the sandbox2::Monitor class.
|
|
|
|
|
|
|
|
#include "sandboxed_api/sandbox2/monitor.h"
|
|
|
|
|
2019-05-08 09:29:51 +08:00
|
|
|
// clang-format off
|
2019-03-19 00:21:48 +08:00
|
|
|
#include <linux/posix_types.h> // NOLINT: Needs to come before linux/ipc.h
|
|
|
|
#include <linux/ipc.h>
|
2019-05-08 09:29:51 +08:00
|
|
|
// clang-format on
|
2019-03-19 00:21:48 +08:00
|
|
|
#include <sched.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/ptrace.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <syscall.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <atomic>
|
|
|
|
#include <cerrno>
|
|
|
|
#include <csignal>
|
2021-05-10 22:03:15 +08:00
|
|
|
#include <cstdio>
|
2019-03-19 00:21:48 +08:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <cstring>
|
|
|
|
#include <ctime>
|
2022-08-09 23:27:28 +08:00
|
|
|
#include <deque>
|
2019-03-19 00:21:48 +08:00
|
|
|
#include <fstream>
|
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
2022-08-09 23:27:28 +08:00
|
|
|
#include <utility>
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
#include <glog/logging.h>
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "absl/cleanup/cleanup.h"
|
2021-09-10 18:34:21 +08:00
|
|
|
#include "absl/container/flat_hash_set.h"
|
2019-06-05 15:25:50 +08:00
|
|
|
#include "sandboxed_api/util/flag.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "absl/memory/memory.h"
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "absl/status/status.h"
|
|
|
|
#include "absl/strings/match.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "absl/strings/str_cat.h"
|
|
|
|
#include "absl/strings/str_format.h"
|
|
|
|
#include "absl/time/time.h"
|
2021-01-14 01:25:25 +08:00
|
|
|
#include "sandboxed_api/config.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "sandboxed_api/sandbox2/client.h"
|
|
|
|
#include "sandboxed_api/sandbox2/comms.h"
|
|
|
|
#include "sandboxed_api/sandbox2/executor.h"
|
|
|
|
#include "sandboxed_api/sandbox2/limits.h"
|
|
|
|
#include "sandboxed_api/sandbox2/mounts.h"
|
|
|
|
#include "sandboxed_api/sandbox2/namespace.h"
|
2020-02-20 23:45:22 +08:00
|
|
|
#include "sandboxed_api/sandbox2/network_proxy/server.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "sandboxed_api/sandbox2/policy.h"
|
|
|
|
#include "sandboxed_api/sandbox2/regs.h"
|
|
|
|
#include "sandboxed_api/sandbox2/result.h"
|
|
|
|
#include "sandboxed_api/sandbox2/sanitizer.h"
|
2019-07-09 16:31:48 +08:00
|
|
|
#include "sandboxed_api/sandbox2/stack_trace.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "sandboxed_api/sandbox2/syscall.h"
|
|
|
|
#include "sandboxed_api/sandbox2/util.h"
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "sandboxed_api/util/file_helpers.h"
|
2021-06-28 17:02:35 +08:00
|
|
|
#include "sandboxed_api/util/status_macros.h"
|
2021-07-27 23:31:38 +08:00
|
|
|
#include "sandboxed_api/util/strerror.h"
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "sandboxed_api/util/temp_file.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2020-02-27 16:03:29 +08:00
|
|
|
using std::string;
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
ABSL_FLAG(bool, sandbox2_report_on_sandboxee_signal, true,
|
|
|
|
"Report sandbox2 sandboxee deaths caused by signals");
|
|
|
|
|
|
|
|
ABSL_FLAG(bool, sandbox2_report_on_sandboxee_timeout, true,
|
|
|
|
"Report sandbox2 sandboxee timeouts");
|
|
|
|
|
|
|
|
ABSL_DECLARE_FLAG(bool, sandbox2_danger_danger_permit_all);
|
2020-01-06 18:33:25 +08:00
|
|
|
ABSL_DECLARE_FLAG(bool, sandbox_libunwind_crash_handler);
|
2019-06-05 15:25:50 +08:00
|
|
|
ABSL_DECLARE_FLAG(string, sandbox2_danger_danger_permit_all_and_log);
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
namespace sandbox2 {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2022-08-09 23:27:28 +08:00
|
|
|
// Since waitpid() is biased towards newer threads, we run the risk of starving
|
|
|
|
// older threads if the newer ones raise a lot of events.
|
|
|
|
// To avoid it, we use this class to gather all the waiting threads and then
|
|
|
|
// return them one at a time on each call to Wait().
|
|
|
|
// In this way, everyone gets their chance.
|
|
|
|
class PidWaiter {
|
|
|
|
public:
|
|
|
|
// Constructs a PidWaiter where the given priority_pid is checked first.
|
|
|
|
explicit PidWaiter(pid_t priority_pid) : priority_pid_(priority_pid) {}
|
|
|
|
|
|
|
|
// Returns the PID of a thread that needs attention, populating 'status' with
|
|
|
|
// the status returned by the waitpid() call. It returns 0 if no threads
|
|
|
|
// require attention at the moment, or -1 if there was an error, in which case
|
|
|
|
// the error value can be found in 'errno'.
|
|
|
|
int Wait(int* status) {
|
|
|
|
if (statuses_.empty() && last_errno_ == 0) {
|
|
|
|
RefillStatuses();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (statuses_.empty()) {
|
|
|
|
if (last_errno_ == 0) return 0;
|
|
|
|
errno = last_errno_;
|
|
|
|
last_errno_ = 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& entry = statuses_.front();
|
|
|
|
pid_t pid = entry.first;
|
|
|
|
*status = entry.second;
|
|
|
|
statuses_.pop_front();
|
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void RefillStatuses() {
|
|
|
|
statuses_.clear();
|
|
|
|
last_errno_ = 0;
|
|
|
|
pid_t pid = priority_pid_;
|
|
|
|
int status;
|
|
|
|
while (true) {
|
|
|
|
// It should be a non-blocking operation (hence WNOHANG), so this function
|
|
|
|
// returns quickly if there are no events to be processed.
|
|
|
|
pid_t ret =
|
|
|
|
waitpid(pid, &status, __WNOTHREAD | __WALL | WUNTRACED | WNOHANG);
|
|
|
|
if (ret > 0) {
|
|
|
|
statuses_.emplace_back(ret, status);
|
|
|
|
} else if (ret < 0) {
|
|
|
|
last_errno_ = errno;
|
|
|
|
break;
|
|
|
|
} else if (pid == -1) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pid = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_t priority_pid_;
|
|
|
|
std::deque<std::pair<pid_t, int>> statuses_ = {};
|
|
|
|
int last_errno_ = 0;
|
|
|
|
};
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// We could use the ProcMapsIterator, however we want the full file content.
|
|
|
|
std::string ReadProcMaps(pid_t pid) {
|
|
|
|
std::ifstream input(absl::StrCat("/proc/", pid, "/maps"),
|
|
|
|
std::ios_base::in | std::ios_base::binary);
|
|
|
|
std::ostringstream contents;
|
|
|
|
contents << input.rdbuf();
|
|
|
|
return contents.str();
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void ContinueProcess(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_CONT, pid, 0, signo) == -1) {
|
2019-05-21 22:30:34 +08:00
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_CONT it";
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_CONT, pid=" << pid << ", sig=" << signo
|
|
|
|
<< ")";
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void StopProcess(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_LISTEN, pid, 0, signo) == -1) {
|
2019-05-21 22:30:34 +08:00
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_LISTEN it";
|
|
|
|
} else {
|
2021-07-12 16:56:06 +08:00
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_LISTEN, pid=" << pid << ", sig=" << signo
|
2019-05-21 22:30:34 +08:00
|
|
|
<< ")";
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-13 22:48:44 +08:00
|
|
|
void CompleteSyscall(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_SYSCALL, pid, 0, signo) == -1) {
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_SYSCALL it";
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_SYSCALL, pid=" << pid << ", sig=" << signo
|
|
|
|
<< ")";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-10 22:03:15 +08:00
|
|
|
void MaybeEnableTomoyoLsmWorkaround(Mounts& mounts, std::string& comms_fd_dev) {
|
|
|
|
static auto tomoyo_active = []() -> bool {
|
|
|
|
std::string lsm_list;
|
|
|
|
if (auto status = sapi::file::GetContents(
|
|
|
|
"/sys/kernel/security/lsm", &lsm_list, sapi::file::Defaults());
|
|
|
|
!status.ok() && !absl::IsNotFound(status)) {
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Checking active LSMs failed: " << status.message() << ": "
|
|
|
|
<< sapi::StrError(errno);
|
2021-05-10 22:03:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return absl::StrContains(lsm_list, "tomoyo");
|
|
|
|
}();
|
|
|
|
|
|
|
|
if (!tomoyo_active) {
|
|
|
|
return;
|
|
|
|
}
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Tomoyo LSM active, enabling workaround";
|
2021-05-10 22:03:15 +08:00
|
|
|
|
|
|
|
if (mounts.ResolvePath("/dev").ok() || mounts.ResolvePath("/dev/fd").ok()) {
|
|
|
|
// Avoid shadowing /dev/fd/1022 below if /dev or /dev/fd is already mapped.
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Parent dir already mapped, skipping";
|
2021-05-10 22:03:15 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto temp_file = sapi::CreateNamedTempFileAndClose("/tmp/");
|
|
|
|
if (!temp_file.ok()) {
|
2021-12-03 23:17:02 +08:00
|
|
|
LOG(WARNING) << "Failed to create empty temp file: " << temp_file.status();
|
2021-05-10 22:03:15 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
comms_fd_dev = std::move(*temp_file);
|
|
|
|
|
|
|
|
// Ignore errors here, as the file itself might already be mapped.
|
|
|
|
if (auto status = mounts.AddFileAt(
|
|
|
|
comms_fd_dev, absl::StrCat("/dev/fd/", Comms::kSandbox2TargetExecFD),
|
|
|
|
false);
|
|
|
|
!status.ok()) {
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Mapping comms FD: %s" << status.message();
|
2021-05-10 22:03:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
Monitor::Monitor(Executor* executor, Policy* policy, Notify* notify)
|
|
|
|
: executor_(executor),
|
|
|
|
notify_(notify),
|
|
|
|
policy_(policy),
|
2021-05-10 22:03:15 +08:00
|
|
|
// NOLINTNEXTLINE clang-diagnostic-deprecated-declarations
|
2019-03-19 00:21:48 +08:00
|
|
|
comms_(executor_->ipc()->comms()),
|
|
|
|
ipc_(executor_->ipc()),
|
|
|
|
wait_for_execve_(executor->enable_sandboxing_pre_execve_) {
|
2019-05-17 17:01:44 +08:00
|
|
|
// It's a pre-connected Comms channel, no need to accept new connection.
|
|
|
|
CHECK(comms_->IsConnected());
|
2019-03-26 22:54:02 +08:00
|
|
|
std::string path =
|
|
|
|
absl::GetFlag(FLAGS_sandbox2_danger_danger_permit_all_and_log);
|
2019-05-15 22:46:26 +08:00
|
|
|
external_kill_request_flag_.test_and_set(std::memory_order_relaxed);
|
|
|
|
dump_stack_request_flag_.test_and_set(std::memory_order_relaxed);
|
2019-03-19 00:21:48 +08:00
|
|
|
if (!path.empty()) {
|
|
|
|
log_file_ = std::fopen(path.c_str(), "a+");
|
|
|
|
PCHECK(log_file_ != nullptr) << "Failed to open log file '" << path << "'";
|
|
|
|
}
|
2021-05-10 22:03:15 +08:00
|
|
|
|
|
|
|
if (auto* ns = policy_->GetNamespace(); ns) {
|
|
|
|
// Check for the Tomoyo LSM, which is active by default in several common
|
|
|
|
// distribution kernels (esp. Debian).
|
|
|
|
MaybeEnableTomoyoLsmWorkaround(ns->mounts(), comms_fd_dev_);
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Monitor::~Monitor() {
|
2021-05-10 22:03:15 +08:00
|
|
|
if (!comms_fd_dev_.empty()) {
|
|
|
|
std::remove(comms_fd_dev_.c_str());
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
if (log_file_) {
|
|
|
|
std::fclose(log_file_);
|
|
|
|
}
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_proxy_server_) {
|
|
|
|
network_proxy_thread_.join();
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2019-05-08 09:29:51 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void LogContainer(const std::vector<std::string>& container) {
|
|
|
|
for (size_t i = 0; i < container.size(); ++i) {
|
2021-12-03 23:17:02 +08:00
|
|
|
LOG(INFO) << "[" << std::setfill('0') << std::setw(4) << i
|
|
|
|
<< "]=" << container[i];
|
2019-05-08 09:29:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
void Monitor::Run() {
|
2021-05-10 22:03:15 +08:00
|
|
|
absl::Cleanup setup_notify = [this] { setup_notification_.Notify(); };
|
|
|
|
|
|
|
|
absl::Cleanup monitor_cleanup = [this] {
|
|
|
|
getrusage(RUSAGE_THREAD, result_.GetRUsageMonitor());
|
|
|
|
notify_->EventFinished(result_);
|
|
|
|
ipc_->InternalCleanupFdMap();
|
|
|
|
done_notification_.Notify();
|
|
|
|
};
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
if (executor_->limits()->wall_time_limit() != absl::ZeroDuration()) {
|
|
|
|
auto deadline = absl::Now() + executor_->limits()->wall_time_limit();
|
|
|
|
deadline_millis_.store(absl::ToUnixMillis(deadline),
|
|
|
|
std::memory_order_relaxed);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// It'd be costly to initialize the sigset_t for each sigtimedwait()
|
|
|
|
// invocation, so do it once per Monitor.
|
|
|
|
sigset_t sigtimedwait_sset;
|
|
|
|
if (!InitSetupSignals(&sigtimedwait_sset)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_SIGNALS);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-05-10 22:03:15 +08:00
|
|
|
Namespace* ns = policy_->GetNamespace();
|
2021-12-03 23:17:02 +08:00
|
|
|
if (VLOG_IS_ON(1) && ns != nullptr) {
|
2019-05-08 09:29:51 +08:00
|
|
|
std::vector<std::string> outside_entries;
|
|
|
|
std::vector<std::string> inside_entries;
|
2021-05-10 22:03:15 +08:00
|
|
|
ns->mounts().RecursivelyListMounts(
|
2019-05-08 09:29:51 +08:00
|
|
|
/*outside_entries=*/&outside_entries,
|
|
|
|
/*inside_entries=*/&inside_entries);
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Outside entries mapped to chroot:";
|
2019-05-08 09:29:51 +08:00
|
|
|
LogContainer(outside_entries);
|
2021-12-03 23:17:02 +08:00
|
|
|
VLOG(1) << "Inside entries as they appear in chroot:";
|
2019-05-08 09:29:51 +08:00
|
|
|
LogContainer(inside_entries);
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// Don't trace the child: it will allow to use 'strace -f' with the whole
|
|
|
|
// sandbox master/monitor, which ptrace_attach'es to the child.
|
|
|
|
int clone_flags = CLONE_UNTRACED;
|
|
|
|
|
2020-02-20 23:45:22 +08:00
|
|
|
if (policy_->allowed_hosts_) {
|
|
|
|
EnableNetworkProxyServer();
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// Get PID of the sandboxee.
|
2019-09-03 17:36:41 +08:00
|
|
|
bool should_have_init = ns && (ns->GetCloneFlags() & CLONE_NEWPID);
|
2022-03-09 21:16:27 +08:00
|
|
|
absl::StatusOr<Executor::Process> process =
|
|
|
|
executor_->StartSubProcess(clone_flags, ns, policy_->capabilities());
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2022-03-09 21:16:27 +08:00
|
|
|
if (!process.ok()) {
|
|
|
|
LOG(ERROR) << "Starting sandboxed subprocess failed: " << process.status();
|
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_SUBPROCESS);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_ = process->main_pid;
|
|
|
|
|
|
|
|
if (process->init_pid > 0) {
|
|
|
|
if (ptrace(PTRACE_SEIZE, process->init_pid, 0, PTRACE_O_EXITKILL) != 0) {
|
2019-09-20 21:13:08 +08:00
|
|
|
if (errno == ESRCH) {
|
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_PTRACE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PLOG(FATAL) << "attaching to init process failed";
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2022-03-09 21:16:27 +08:00
|
|
|
if (pid_ <= 0 || (should_have_init && process->init_pid <= 0)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_SUBPROCESS);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!notify_->EventStarted(pid_, comms_)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_NOTIFY);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!InitSendIPC()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_IPC);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!InitSendCwd()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_CWD);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!InitSendPolicy()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_POLICY);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!WaitForSandboxReady()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_WAIT);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!InitApplyLimits()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_LIMITS);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// This call should be the last in the init sequence, because it can cause the
|
|
|
|
// sandboxee to enter ptrace-stopped state, in which it will not be able to
|
|
|
|
// send any messages over the Comms channel.
|
|
|
|
if (!InitPtraceAttach()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_PTRACE);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tell the parent thread (Sandbox2 object) that we're done with the initial
|
|
|
|
// set-up process of the sandboxee.
|
2021-05-10 22:03:15 +08:00
|
|
|
std::move(setup_notify).Invoke();
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
MainLoop(&sigtimedwait_sset);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::IsActivelyMonitoring() {
|
|
|
|
// If we're still waiting for execve(), then we allow all syscalls.
|
|
|
|
return !wait_for_execve_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::SetActivelyMonitoring() { wait_for_execve_ = false; }
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void Monitor::SetExitStatusCode(Result::StatusEnum final_status,
|
|
|
|
uintptr_t reason_code) {
|
|
|
|
CHECK(result_.final_status() == Result::UNSET);
|
|
|
|
result_.SetExitStatusCode(final_status, reason_code);
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
bool Monitor::ShouldCollectStackTrace() {
|
|
|
|
// Only get the stacktrace if we are not in the libunwind sandbox (avoid
|
|
|
|
// recursion).
|
|
|
|
bool stacktrace_collection_possible =
|
2020-01-06 18:33:25 +08:00
|
|
|
(policy_->GetNamespace() ||
|
|
|
|
absl::GetFlag(FLAGS_sandbox_libunwind_crash_handler) == false) &&
|
|
|
|
executor_->libunwind_sbox_for_pid_ == 0;
|
2019-05-15 22:46:26 +08:00
|
|
|
if (!stacktrace_collection_possible) {
|
|
|
|
LOG(ERROR) << "Cannot collect stack trace. Unwind pid "
|
|
|
|
<< executor_->libunwind_sbox_for_pid_ << ", namespace "
|
|
|
|
<< policy_->GetNamespace();
|
|
|
|
return false;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
switch (result_.final_status()) {
|
|
|
|
case Result::EXTERNAL_KILL:
|
|
|
|
return policy_->collect_stacktrace_on_kill_;
|
|
|
|
case Result::TIMEOUT:
|
|
|
|
return policy_->collect_stacktrace_on_timeout_;
|
|
|
|
case Result::SIGNALED:
|
|
|
|
return policy_->collect_stacktrace_on_signal_;
|
|
|
|
case Result::VIOLATION:
|
|
|
|
return policy_->collect_stacktrace_on_violation_;
|
2021-08-16 18:12:39 +08:00
|
|
|
case Result::OK:
|
|
|
|
return policy_->collect_stacktrace_on_exit_;
|
2019-05-15 22:46:26 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void Monitor::SetAdditionalResultInfo(std::unique_ptr<Regs> regs) {
|
|
|
|
pid_t pid = regs->pid();
|
|
|
|
result_.SetRegs(std::move(regs));
|
|
|
|
result_.SetProgName(util::GetProgName(pid));
|
|
|
|
result_.SetProcMaps(ReadProcMaps(pid_));
|
2020-07-20 15:24:12 +08:00
|
|
|
if (!ShouldCollectStackTrace()) {
|
2022-06-23 21:41:53 +08:00
|
|
|
VLOG(1) << "Stack traces have been disabled";
|
2020-07-20 15:24:12 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto* ns = policy_->GetNamespace();
|
|
|
|
const Mounts empty_mounts;
|
2021-06-28 17:02:35 +08:00
|
|
|
absl::StatusOr<std::vector<std::string>> stack_trace =
|
|
|
|
GetStackTrace(result_.GetRegs(), ns ? ns->mounts() : empty_mounts);
|
|
|
|
|
|
|
|
if (!stack_trace.ok()) {
|
|
|
|
LOG(ERROR) << "Could not obtain stack trace: " << stack_trace.status();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
result_.set_stack_trace(*stack_trace);
|
2020-07-20 15:24:12 +08:00
|
|
|
|
|
|
|
LOG(INFO) << "Stack trace: [";
|
|
|
|
for (const auto& frame : CompactStackTrace(result_.stack_trace())) {
|
|
|
|
LOG(INFO) << " " << frame;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2020-07-20 15:24:12 +08:00
|
|
|
LOG(INFO) << "]";
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2022-05-13 19:34:50 +08:00
|
|
|
bool Monitor::KillSandboxee() {
|
2019-05-15 22:46:26 +08:00
|
|
|
VLOG(1) << "Sending SIGKILL to the PID: " << pid_;
|
|
|
|
if (kill(pid_, SIGKILL) != 0) {
|
2022-05-09 21:57:52 +08:00
|
|
|
PLOG(ERROR) << "Could not send SIGKILL to PID " << pid_;
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_KILL);
|
2022-05-13 19:34:50 +08:00
|
|
|
return false;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2022-05-13 19:34:50 +08:00
|
|
|
return true;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2022-05-13 19:34:50 +08:00
|
|
|
bool Monitor::InterruptSandboxee() {
|
2022-05-09 21:57:52 +08:00
|
|
|
if (ptrace(PTRACE_INTERRUPT, pid_, 0, 0) == -1) {
|
|
|
|
PLOG(ERROR) << "Could not send interrupt to pid=" << pid_;
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INTERRUPT);
|
2022-05-13 19:34:50 +08:00
|
|
|
return false;
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
2022-05-13 19:34:50 +08:00
|
|
|
return true;
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// Not defined in glibc.
|
|
|
|
#define __WPTRACEEVENT(x) ((x & 0xff0000) >> 16)
|
2019-05-15 22:46:26 +08:00
|
|
|
|
|
|
|
void Monitor::MainLoop(sigset_t* sset) {
|
|
|
|
bool sandboxee_exited = false;
|
2022-08-09 23:27:28 +08:00
|
|
|
PidWaiter pid_waiter(pid_);
|
2019-05-15 22:46:26 +08:00
|
|
|
int status;
|
|
|
|
// All possible still running children of main process, will be killed due to
|
|
|
|
// PTRACE_O_EXITKILL ptrace() flag.
|
2022-05-13 19:34:50 +08:00
|
|
|
while (result_.final_status() == Result::UNSET) {
|
2019-05-15 22:46:26 +08:00
|
|
|
int64_t deadline = deadline_millis_.load(std::memory_order_relaxed);
|
|
|
|
if (deadline != 0 && absl::Now() >= absl::FromUnixMillis(deadline)) {
|
|
|
|
VLOG(1) << "Sandbox process hit timeout due to the walltime timer";
|
|
|
|
timed_out_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!dump_stack_request_flag_.test_and_set(std::memory_order_relaxed)) {
|
|
|
|
should_dump_stack_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!InterruptSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!external_kill_request_flag_.test_and_set(std::memory_order_relaxed)) {
|
|
|
|
external_kill_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_proxy_server_ &&
|
|
|
|
network_proxy_server_->violation_occurred_.load(
|
|
|
|
std::memory_order_acquire) &&
|
|
|
|
!network_violation_) {
|
|
|
|
network_violation_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
|
|
|
|
2022-08-09 23:27:28 +08:00
|
|
|
pid_t ret = pid_waiter.Wait(&status);
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
constexpr timespec ts = {kWakeUpPeriodSec, kWakeUpPeriodNSec};
|
|
|
|
int signo = sigtimedwait(sset, nullptr, &ts);
|
|
|
|
LOG_IF(ERROR, signo != -1 && signo != SIGCHLD)
|
|
|
|
<< "Unknown signal received: " << signo;
|
2019-05-09 02:34:05 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
|
2019-05-09 02:34:05 +08:00
|
|
|
if (ret == -1) {
|
2019-05-15 22:46:26 +08:00
|
|
|
if (errno == ECHILD) {
|
|
|
|
LOG(ERROR) << "PANIC(). The main process has not exited yet, "
|
|
|
|
<< "yet we haven't seen its exit event";
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_CHILD);
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "waitpid() failed";
|
|
|
|
}
|
2019-05-09 02:34:05 +08:00
|
|
|
continue;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2019-05-09 02:34:05 +08:00
|
|
|
VLOG(3) << "waitpid() returned with PID: " << ret << ", status: " << status;
|
|
|
|
|
|
|
|
if (WIFEXITED(status)) {
|
|
|
|
VLOG(1) << "PID: " << ret
|
|
|
|
<< " finished with code: " << WEXITSTATUS(status);
|
|
|
|
// That's the main process, set the exit code, and exit. It will kill
|
|
|
|
// all remaining processes (if there are any) because of the
|
|
|
|
// PTRACE_O_EXITKILL ptrace() flag.
|
|
|
|
if (ret == pid_) {
|
|
|
|
if (IsActivelyMonitoring()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::OK, WEXITSTATUS(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
} else {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_MONITOR);
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
sandboxee_exited = true;
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
|
|
|
} else if (WIFSIGNALED(status)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
// This usually does not happen, but might.
|
|
|
|
// Quote from the manual:
|
|
|
|
// A SIGKILL signal may still cause a PTRACE_EVENT_EXIT stop before
|
|
|
|
// actual signal death. This may be changed in the future;
|
2019-05-09 02:34:05 +08:00
|
|
|
VLOG(1) << "PID: " << ret << " terminated with signal: "
|
|
|
|
<< util::GetSignalName(WTERMSIG(status));
|
|
|
|
if (ret == pid_) {
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_violation_) {
|
|
|
|
SetExitStatusCode(Result::VIOLATION, Result::VIOLATION_NETWORK);
|
|
|
|
result_.SetNetworkViolation(network_proxy_server_->violation_msg_);
|
|
|
|
} else if (external_kill_) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::EXTERNAL_KILL, 0);
|
|
|
|
} else if (timed_out_) {
|
|
|
|
SetExitStatusCode(Result::TIMEOUT, 0);
|
|
|
|
} else {
|
|
|
|
SetExitStatusCode(Result::SIGNALED, WTERMSIG(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
sandboxee_exited = true;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-09 02:34:05 +08:00
|
|
|
} else if (WIFSTOPPED(status)) {
|
|
|
|
VLOG(2) << "PID: " << ret
|
|
|
|
<< " received signal: " << util::GetSignalName(WSTOPSIG(status))
|
2021-04-16 03:01:42 +08:00
|
|
|
<< " with event: "
|
|
|
|
<< util::GetPtraceEventName(__WPTRACEEVENT(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
StateProcessStopped(ret, status);
|
|
|
|
} else if (WIFCONTINUED(status)) {
|
|
|
|
VLOG(2) << "PID: " << ret << " is being continued";
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
// Try to make sure main pid is killed and reaped
|
|
|
|
if (!sandboxee_exited) {
|
|
|
|
kill(pid_, SIGKILL);
|
|
|
|
constexpr auto kGracefullExitTimeout = absl::Milliseconds(200);
|
|
|
|
auto deadline = absl::Now() + kGracefullExitTimeout;
|
|
|
|
for (;;) {
|
|
|
|
auto left = deadline - absl::Now();
|
|
|
|
if (absl::Now() >= deadline) {
|
|
|
|
LOG(INFO) << "Waiting for sandboxee exit timed out";
|
|
|
|
break;
|
|
|
|
}
|
2022-08-09 23:27:28 +08:00
|
|
|
pid_t ret = pid_waiter.Wait(&status);
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ret == -1) {
|
|
|
|
PLOG(ERROR) << "waitpid() failed";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret == pid_ && (WIFSIGNALED(status) || WIFEXITED(status))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret == 0) {
|
|
|
|
auto ts = absl::ToTimespec(left);
|
|
|
|
sigtimedwait(sset, nullptr, &ts);
|
|
|
|
} else if (WIFSTOPPED(status) &&
|
|
|
|
__WPTRACEEVENT(status) == PTRACE_EVENT_EXIT) {
|
|
|
|
VLOG(2) << "PID: " << ret << " PTRACE_EVENT_EXIT ";
|
|
|
|
ContinueProcess(ret, 0);
|
|
|
|
} else {
|
|
|
|
kill(pid_, SIGKILL);
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
bool Monitor::InitSetupSignals(sigset_t* sset) {
|
|
|
|
if (sigemptyset(sset) == -1) {
|
|
|
|
PLOG(ERROR) << "sigemptyset()";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// sigtimedwait will react (wake-up) to arrival of this signal.
|
2019-05-15 22:46:26 +08:00
|
|
|
if (sigaddset(sset, SIGCHLD) == -1) {
|
|
|
|
PLOG(ERROR) << "sigaddset(SIGCHLD)";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
|
|
|
|
if (pthread_sigmask(SIG_BLOCK, sset, nullptr) == -1) {
|
|
|
|
PLOG(ERROR) << "pthread_sigmask(SIG_BLOCK, SIGCHLD)";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::InitSendPolicy() {
|
|
|
|
if (!policy_->SendPolicy(comms_)) {
|
|
|
|
LOG(ERROR) << "Couldn't send policy";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::InitSendCwd() {
|
|
|
|
if (!comms_->SendString(executor_->cwd_)) {
|
|
|
|
PLOG(ERROR) << "Couldn't send cwd";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-03-02 22:42:10 +08:00
|
|
|
bool Monitor::InitApplyLimit(pid_t pid, int resource,
|
2019-03-19 00:21:48 +08:00
|
|
|
const rlimit64& rlim) const {
|
2022-03-02 22:42:10 +08:00
|
|
|
#if defined(__ANDROID__)
|
|
|
|
using RlimitResource = int;
|
|
|
|
#else
|
|
|
|
using RlimitResource = __rlimit_resource;
|
|
|
|
#endif
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
rlimit64 curr_limit;
|
2022-03-02 22:42:10 +08:00
|
|
|
if (prlimit64(pid, static_cast<RlimitResource>(resource), nullptr,
|
|
|
|
&curr_limit) == -1) {
|
2019-05-17 16:55:13 +08:00
|
|
|
PLOG(ERROR) << "prlimit64(" << pid << ", " << util::GetRlimitName(resource)
|
|
|
|
<< ")";
|
2019-05-16 17:24:38 +08:00
|
|
|
} else if (rlim.rlim_cur > curr_limit.rlim_max) {
|
2019-03-19 00:21:48 +08:00
|
|
|
// In such case, don't update the limits, as it will fail. Just stick to the
|
|
|
|
// current ones (which are already lower than intended).
|
2019-05-17 16:55:13 +08:00
|
|
|
LOG(ERROR) << util::GetRlimitName(resource)
|
|
|
|
<< ": new.current > current.max (" << rlim.rlim_cur << " > "
|
|
|
|
<< curr_limit.rlim_max << "), skipping";
|
2019-05-16 17:24:38 +08:00
|
|
|
return true;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-16 17:24:38 +08:00
|
|
|
|
2022-03-02 22:42:10 +08:00
|
|
|
if (prlimit64(pid, static_cast<RlimitResource>(resource), &rlim, nullptr) ==
|
|
|
|
-1) {
|
2019-05-17 16:55:13 +08:00
|
|
|
PLOG(ERROR) << "prlimit64(" << pid << ", " << util::GetRlimitName(resource)
|
|
|
|
<< ", " << rlim.rlim_cur << ")";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::InitApplyLimits() {
|
|
|
|
Limits* limits = executor_->limits();
|
|
|
|
return InitApplyLimit(pid_, RLIMIT_AS, limits->rlimit_as()) &&
|
|
|
|
InitApplyLimit(pid_, RLIMIT_CPU, limits->rlimit_cpu()) &&
|
|
|
|
InitApplyLimit(pid_, RLIMIT_FSIZE, limits->rlimit_fsize()) &&
|
|
|
|
InitApplyLimit(pid_, RLIMIT_NOFILE, limits->rlimit_nofile()) &&
|
|
|
|
InitApplyLimit(pid_, RLIMIT_CORE, limits->rlimit_core());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::InitSendIPC() { return ipc_->SendFdsOverComms(); }
|
|
|
|
|
|
|
|
bool Monitor::WaitForSandboxReady() {
|
|
|
|
uint32_t tmp;
|
|
|
|
if (!comms_->RecvUint32(&tmp)) {
|
|
|
|
LOG(ERROR) << "Couldn't receive 'Client::kClient2SandboxReady' message";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (tmp != Client::kClient2SandboxReady) {
|
|
|
|
LOG(ERROR) << "Received " << tmp << " != Client::kClient2SandboxReady ("
|
|
|
|
<< Client::kClient2SandboxReady << ")";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Monitor::InitPtraceAttach() {
|
2022-01-20 20:39:48 +08:00
|
|
|
sanitizer::WaitForSanitizer();
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
// Get a list of tasks.
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks;
|
|
|
|
if (auto task_list = sanitizer::GetListOfTasks(pid_); task_list.ok()) {
|
|
|
|
tasks = *std::move(task_list);
|
|
|
|
} else {
|
|
|
|
LOG(ERROR) << "Could not get list of tasks: "
|
|
|
|
<< task_list.status().message();
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-12-19 00:23:31 +08:00
|
|
|
if (tasks.find(pid_) == tasks.end()) {
|
|
|
|
LOG(ERROR) << "The pid " << pid_ << " was not found in its own tasklist.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// With TSYNC, we can allow threads: seccomp applies to all threads.
|
|
|
|
if (tasks.size() > 1) {
|
|
|
|
LOG(WARNING) << "PID " << pid_ << " has " << tasks.size() << " threads,"
|
|
|
|
<< " at the time of call to SandboxMeHere. If you are seeing"
|
|
|
|
<< " more sandbox violations than expected, this might be"
|
|
|
|
<< " the reason why"
|
|
|
|
<< ".";
|
|
|
|
}
|
|
|
|
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks_attached;
|
2019-12-19 00:23:31 +08:00
|
|
|
int retries = 0;
|
|
|
|
absl::Time deadline = absl::Now() + absl::Seconds(2);
|
|
|
|
|
|
|
|
// In some situations we allow ptrace to try again when it fails.
|
|
|
|
while (!tasks.empty()) {
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks_left;
|
2019-12-19 00:23:31 +08:00
|
|
|
for (int task : tasks) {
|
|
|
|
constexpr intptr_t options =
|
|
|
|
PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK |
|
|
|
|
PTRACE_O_TRACEVFORKDONE | PTRACE_O_TRACECLONE | PTRACE_O_TRACEEXEC |
|
|
|
|
PTRACE_O_TRACEEXIT | PTRACE_O_TRACESECCOMP | PTRACE_O_EXITKILL;
|
|
|
|
int ret = ptrace(PTRACE_SEIZE, task, 0, options);
|
|
|
|
if (ret != 0) {
|
|
|
|
if (errno == EPERM) {
|
|
|
|
// Sometimes when a task is exiting we can get an EPERM from ptrace.
|
|
|
|
// Let's try again up until the timeout in this situation.
|
|
|
|
PLOG(WARNING) << "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options))
|
|
|
|
<< "), trying again...";
|
|
|
|
tasks_left.insert(task);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
// A task may have exited since we captured the task list, we will
|
|
|
|
// allow things to continue after we log a warning.
|
|
|
|
PLOG(WARNING)
|
|
|
|
<< "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options))
|
|
|
|
<< ") skipping exited task. Continuing with other tasks.";
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Any other errno will be considered a failure.
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options)) << ") failed.";
|
|
|
|
return false;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
tasks_attached.insert(task);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
if (!tasks_left.empty()) {
|
|
|
|
if (absl::Now() < deadline) {
|
|
|
|
LOG(ERROR) << "Attaching to sandboxee timed out: could not attach to "
|
|
|
|
<< tasks_left.size() << " tasks";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Exponential Backoff.
|
|
|
|
constexpr absl::Duration kInitialRetry = absl::Milliseconds(1);
|
|
|
|
constexpr absl::Duration kMaxRetry = absl::Milliseconds(20);
|
|
|
|
const absl::Duration retry_interval =
|
|
|
|
kInitialRetry * (1 << std::min(10, retries++));
|
|
|
|
absl::SleepFor(
|
|
|
|
std::min({retry_interval, kMaxRetry, deadline - absl::Now()}));
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
tasks = std::move(tasks_left);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get a list of tasks after attaching.
|
2021-09-10 18:34:21 +08:00
|
|
|
if (auto tasks_list = sanitizer::GetListOfTasks(pid_); tasks_list.ok()) {
|
|
|
|
tasks = *std::move(tasks_list);
|
|
|
|
} else {
|
|
|
|
LOG(ERROR) << "Could not get list of tasks: "
|
|
|
|
<< tasks_list.status().message();
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-12-19 00:23:31 +08:00
|
|
|
// Check that we attached to all the threads
|
|
|
|
if (tasks_attached != tasks) {
|
2019-03-19 00:21:48 +08:00
|
|
|
LOG(ERROR) << "The pid " << pid_
|
|
|
|
<< " spawned new threads while we were trying to attach to it.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No glibc wrapper for gettid - see 'man gettid'.
|
|
|
|
VLOG(1) << "Monitor (PID: " << getpid()
|
|
|
|
<< ", TID: " << util::Syscall(__NR_gettid)
|
|
|
|
<< ") attached to PID: " << pid_;
|
|
|
|
|
|
|
|
// Technically, the sandboxee can be in a ptrace-stopped state right now,
|
|
|
|
// because some signal could have arrived in the meantime. Yet, this
|
|
|
|
// Comms::SendUint32 call shouldn't lock our process, because the underlying
|
|
|
|
// socketpair() channel is buffered, hence it will accept the uint32_t message
|
|
|
|
// no matter what is the current state of the sandboxee, and it will allow for
|
|
|
|
// our process to continue and unlock the sandboxee with the proper ptrace
|
|
|
|
// event handling.
|
|
|
|
if (!comms_->SendUint32(Client::kSandbox2ClientDone)) {
|
|
|
|
LOG(ERROR) << "Couldn't send Client::kSandbox2ClientDone message";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::ActionProcessSyscall(Regs* regs, const Syscall& syscall) {
|
|
|
|
// If the sandboxing is not enabled yet, allow the first __NR_execveat.
|
|
|
|
if (syscall.nr() == __NR_execveat && !IsActivelyMonitoring()) {
|
|
|
|
VLOG(1) << "[PERMITTED/BEFORE_EXECVEAT]: "
|
|
|
|
<< "SYSCALL ::: PID: " << regs->pid() << ", PROG: '"
|
|
|
|
<< util::GetProgName(regs->pid())
|
|
|
|
<< "' : " << syscall.GetDescription();
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify can decide whether we want to allow this syscall. It could be useful
|
|
|
|
// for sandbox setups in which some syscalls might still need some logging,
|
|
|
|
// but nonetheless be allowed ('permissible syscalls' in sandbox v1).
|
2022-01-13 22:48:44 +08:00
|
|
|
auto trace_response = notify_->EventSyscallTrace(syscall);
|
|
|
|
if (trace_response == Notify::TraceAction::kAllow) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
2022-01-13 22:48:44 +08:00
|
|
|
if (trace_response == Notify::TraceAction::kInspectAfterReturn) {
|
|
|
|
// Note that a process might die without an exit-stop before the syscall is
|
|
|
|
// completed (eg. a thread calls execve() and the thread group leader dies),
|
2022-07-25 23:27:27 +08:00
|
|
|
// so the entry is removed when the process exits.
|
2022-01-13 22:48:44 +08:00
|
|
|
syscalls_in_progress_[regs->pid()] = syscall;
|
|
|
|
CompleteSyscall(regs->pid(), 0);
|
|
|
|
return;
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
// TODO(wiktorg): Further clean that up, probably while doing monitor cleanup
|
|
|
|
// log_file_ not null iff FLAGS_sandbox2_danger_danger_permit_all_and_log is
|
|
|
|
// set.
|
|
|
|
if (log_file_) {
|
|
|
|
std::string syscall_description = syscall.GetDescription();
|
|
|
|
PCHECK(absl::FPrintF(log_file_, "PID: %d %s\n", regs->pid(),
|
|
|
|
syscall_description) >= 0);
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (absl::GetFlag(FLAGS_sandbox2_danger_danger_permit_all)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ActionProcessSyscallViolation(regs, syscall, kSyscallViolation);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::ActionProcessSyscallViolation(Regs* regs, const Syscall& syscall,
|
|
|
|
ViolationType violation_type) {
|
2019-05-15 22:46:26 +08:00
|
|
|
LogSyscallViolation(syscall);
|
2019-03-19 00:21:48 +08:00
|
|
|
notify_->EventSyscallViolation(syscall, violation_type);
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::VIOLATION, syscall.nr());
|
2019-03-19 00:21:48 +08:00
|
|
|
result_.SetSyscall(absl::make_unique<Syscall>(syscall));
|
2019-05-15 22:46:26 +08:00
|
|
|
SetAdditionalResultInfo(absl::make_unique<Regs>(*regs));
|
|
|
|
// Rewrite the syscall argument to something invalid (-1).
|
|
|
|
// The process will be killed anyway so this is just a precaution.
|
2019-03-19 00:21:48 +08:00
|
|
|
auto status = regs->SkipSyscallReturnValue(-ENOSYS);
|
|
|
|
if (!status.ok()) {
|
|
|
|
LOG(ERROR) << status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void Monitor::LogSyscallViolation(const Syscall& syscall) const {
|
2019-03-19 00:21:48 +08:00
|
|
|
// Do not unwind libunwind.
|
|
|
|
if (executor_->libunwind_sbox_for_pid_ != 0) {
|
|
|
|
LOG(ERROR) << "Sandbox violation during execution of libunwind: "
|
|
|
|
<< syscall.GetDescription();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// So, this is an invalid syscall. Will be killed by seccomp-bpf policies as
|
|
|
|
// well, but we should be on a safe side here as well.
|
|
|
|
LOG(ERROR) << "SANDBOX VIOLATION : PID: " << syscall.pid() << ", PROG: '"
|
|
|
|
<< util::GetProgName(syscall.pid())
|
|
|
|
<< "' : " << syscall.GetDescription();
|
2021-04-17 03:42:41 +08:00
|
|
|
if (VLOG_IS_ON(1)) {
|
|
|
|
VLOG(1) << "Cmdline: " << util::GetCmdLine(syscall.pid());
|
|
|
|
VLOG(1) << "Task Name: " << util::GetProcStatusLine(syscall.pid(), "Name");
|
|
|
|
VLOG(1) << "Tgid: " << util::GetProcStatusLine(syscall.pid(), "Tgid");
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
LogSyscallViolationExplanation(syscall);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::EventPtraceSeccomp(pid_t pid, int event_msg) {
|
2022-08-31 23:09:05 +08:00
|
|
|
if (event_msg < sapi::cpu::Architecture::kUnknown ||
|
|
|
|
event_msg > sapi::cpu::Architecture::kMax) {
|
|
|
|
// We've observed that, if the process has exited, the event_msg may contain
|
|
|
|
// the exit status even though we haven't received the exit event yet.
|
|
|
|
// To work around this, if the event msg is not in the range of the known
|
|
|
|
// architectures, we assume that it's an exit status. We deal with it by
|
|
|
|
// ignoring this event, and we'll get the exit event in the next iteration.
|
|
|
|
LOG(WARNING) << "received event_msg for unknown architecture: " << event_msg
|
|
|
|
<< "; the program may have exited";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// If the seccomp-policy is using RET_TRACE, we request that it returns the
|
|
|
|
// syscall architecture identifier in the SECCOMP_RET_DATA.
|
2021-01-14 01:25:25 +08:00
|
|
|
const auto syscall_arch = static_cast<sapi::cpu::Architecture>(event_msg);
|
2019-03-19 00:21:48 +08:00
|
|
|
Regs regs(pid);
|
|
|
|
auto status = regs.Fetch();
|
|
|
|
if (!status.ok()) {
|
2022-05-09 21:57:52 +08:00
|
|
|
// Ignore if process is killed in the meanwhile
|
|
|
|
if (absl::IsNotFound(status)) {
|
|
|
|
LOG(WARNING) << "failed to fetch regs: " << status;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Syscall syscall = regs.ToSyscall(syscall_arch);
|
|
|
|
// If the architecture of the syscall used is different that the current host
|
|
|
|
// architecture, report a violation.
|
|
|
|
if (syscall_arch != Syscall::GetHostArch()) {
|
|
|
|
ActionProcessSyscallViolation(®s, syscall, kArchitectureSwitchViolation);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ActionProcessSyscall(®s, syscall);
|
|
|
|
}
|
|
|
|
|
2022-01-13 22:48:44 +08:00
|
|
|
void Monitor::EventSyscallExit(pid_t pid) {
|
|
|
|
// Check that the monitor wants to inspect the current syscall's return value.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index == syscalls_in_progress_.end()) {
|
|
|
|
LOG(ERROR) << "Expected a syscall in progress in PID " << pid;
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Regs regs(pid);
|
|
|
|
auto status = regs.Fetch();
|
|
|
|
if (!status.ok()) {
|
2022-05-09 21:57:52 +08:00
|
|
|
// Ignore if process is killed in the meanwhile
|
|
|
|
if (absl::IsNotFound(status)) {
|
|
|
|
LOG(WARNING) << "failed to fetch regs: " << status;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
2022-01-13 22:48:44 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int64_t return_value = regs.GetReturnValue(sapi::host_cpu::Architecture());
|
|
|
|
notify_->EventSyscallReturn(index->second, return_value);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
ContinueProcess(pid, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::EventPtraceNewProcess(pid_t pid, int event_msg) {
|
|
|
|
// ptrace doesn't issue syscall-exit-stops for successful fork/vfork/clone
|
|
|
|
// system calls. Check if the monitor wanted to inspect the syscall's return
|
|
|
|
// value, and call EventSyscallReturn for the parent process if so.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index != syscalls_in_progress_.end()) {
|
|
|
|
auto syscall_nr = index->second.nr();
|
|
|
|
bool creating_new_process = syscall_nr == __NR_clone;
|
|
|
|
#ifdef __NR_fork
|
|
|
|
creating_new_process = creating_new_process || syscall_nr == __NR_fork;
|
|
|
|
#endif
|
|
|
|
#ifdef __NR_vfork
|
|
|
|
creating_new_process = creating_new_process || syscall_nr == __NR_vfork;
|
|
|
|
#endif
|
|
|
|
if (!creating_new_process) {
|
|
|
|
LOG(ERROR) << "Expected a fork/vfork/clone syscall in progress in PID "
|
|
|
|
<< pid << "; actual: " << index->second.GetDescription();
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
notify_->EventSyscallReturn(index->second, event_msg);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
}
|
|
|
|
ContinueProcess(pid, 0);
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
void Monitor::EventPtraceExec(pid_t pid, int event_msg) {
|
|
|
|
if (!IsActivelyMonitoring()) {
|
|
|
|
VLOG(1) << "PTRACE_EVENT_EXEC seen from PID: " << event_msg
|
|
|
|
<< ". SANDBOX ENABLED!";
|
|
|
|
SetActivelyMonitoring();
|
2022-01-13 22:48:44 +08:00
|
|
|
} else {
|
|
|
|
// ptrace doesn't issue syscall-exit-stops for successful execve/execveat
|
|
|
|
// system calls. Check if the monitor wanted to inspect the syscall's return
|
|
|
|
// value, and call EventSyscallReturn if so.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index != syscalls_in_progress_.end()) {
|
|
|
|
auto syscall_nr = index->second.nr();
|
|
|
|
if (syscall_nr != __NR_execve && syscall_nr != __NR_execveat) {
|
|
|
|
LOG(ERROR) << "Expected an execve/execveat syscall in progress in PID "
|
|
|
|
<< pid << "; actual: " << index->second.GetDescription();
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
notify_->EventSyscallReturn(index->second, 0);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::EventPtraceExit(pid_t pid, int event_msg) {
|
2022-07-25 23:27:27 +08:00
|
|
|
// Forget about any syscalls in progress for this PID.
|
|
|
|
syscalls_in_progress_.erase(pid);
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
// A regular exit, let it continue (fast-path).
|
2021-08-16 18:12:39 +08:00
|
|
|
if (ABSL_PREDICT_TRUE(
|
|
|
|
WIFEXITED(event_msg) &&
|
|
|
|
(!policy_->collect_stacktrace_on_exit_ || pid != pid_))) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-05-09 21:57:52 +08:00
|
|
|
const bool is_seccomp =
|
|
|
|
WIFSIGNALED(event_msg) && WTERMSIG(event_msg) == SIGSYS;
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
// Fetch the registers as we'll need them to fill the result in any case
|
|
|
|
auto regs = absl::make_unique<Regs>(pid);
|
2022-05-09 21:57:52 +08:00
|
|
|
if (is_seccomp || pid == pid_) {
|
|
|
|
auto status = regs->Fetch();
|
|
|
|
if (!status.ok()) {
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
|
|
|
return;
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
// Process signaled due to seccomp violation.
|
2022-05-09 21:57:52 +08:00
|
|
|
if (is_seccomp) {
|
2019-05-15 22:46:26 +08:00
|
|
|
VLOG(1) << "PID: " << pid << " violation uncovered via the EXIT_EVENT";
|
|
|
|
ActionProcessSyscallViolation(
|
|
|
|
regs.get(), regs->ToSyscall(Syscall::GetHostArch()), kSyscallViolation);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-16 18:12:39 +08:00
|
|
|
// This can be reached in four cases:
|
2019-05-15 22:46:26 +08:00
|
|
|
// 1) Process was killed from the sandbox.
|
|
|
|
// 2) Process was killed because it hit a timeout.
|
|
|
|
// 3) Regular signal/other exit cause.
|
2021-08-16 18:12:39 +08:00
|
|
|
// 4) Normal exit for which we want to obtain stack trace.
|
2019-05-15 22:46:26 +08:00
|
|
|
if (pid == pid_) {
|
|
|
|
VLOG(1) << "PID: " << pid << " main special exit";
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_violation_) {
|
|
|
|
SetExitStatusCode(Result::VIOLATION, Result::VIOLATION_NETWORK);
|
|
|
|
result_.SetNetworkViolation(network_proxy_server_->violation_msg_);
|
|
|
|
} else if (external_kill_) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::EXTERNAL_KILL, 0);
|
|
|
|
} else if (timed_out_) {
|
|
|
|
SetExitStatusCode(Result::TIMEOUT, 0);
|
2021-08-16 18:12:39 +08:00
|
|
|
} else if (WIFEXITED(event_msg)) {
|
|
|
|
SetExitStatusCode(Result::OK, WEXITSTATUS(event_msg));
|
2019-05-15 22:46:26 +08:00
|
|
|
} else {
|
|
|
|
SetExitStatusCode(Result::SIGNALED, WTERMSIG(event_msg));
|
|
|
|
}
|
|
|
|
SetAdditionalResultInfo(std::move(regs));
|
|
|
|
}
|
|
|
|
VLOG(1) << "Continuing";
|
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::EventPtraceStop(pid_t pid, int stopsig) {
|
|
|
|
// It's not a real stop signal. For example PTRACE_O_TRACECLONE and similar
|
|
|
|
// flags to ptrace(PTRACE_SEIZE) might generate this event with SIGTRAP.
|
|
|
|
if (stopsig != SIGSTOP && stopsig != SIGTSTP && stopsig != SIGTTIN &&
|
|
|
|
stopsig != SIGTTOU) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// It's our PID stop signal. Stop it.
|
|
|
|
VLOG(2) << "PID: " << pid << " stopped due to "
|
|
|
|
<< util::GetSignalName(stopsig);
|
2019-05-15 22:46:26 +08:00
|
|
|
StopProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Monitor::StateProcessStopped(pid_t pid, int status) {
|
|
|
|
int stopsig = WSTOPSIG(status);
|
2022-01-13 22:48:44 +08:00
|
|
|
// We use PTRACE_O_TRACESYSGOOD, so we can tell it's a syscall stop without
|
|
|
|
// calling PTRACE_GETSIGINFO by checking the value of the reported signal.
|
|
|
|
bool is_syscall_exit = stopsig == (SIGTRAP | 0x80);
|
|
|
|
if (__WPTRACEEVENT(status) == 0 && !is_syscall_exit) {
|
2019-03-19 00:21:48 +08:00
|
|
|
// Must be a regular signal delivery.
|
|
|
|
VLOG(2) << "PID: " << pid
|
|
|
|
<< " received signal: " << util::GetSignalName(stopsig);
|
|
|
|
notify_->EventSignal(pid, stopsig);
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, stopsig);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long event_msg; // NOLINT
|
|
|
|
if (ptrace(PTRACE_GETEVENTMSG, pid, 0, &event_msg) == -1) {
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
// This happens from time to time, the kernel does not guarantee us that
|
|
|
|
// we get the event in time.
|
|
|
|
PLOG(INFO) << "ptrace(PTRACE_GETEVENTMSG, " << pid << ")";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_GETEVENTMSG, " << pid << ")";
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_GETEVENT);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ABSL_PREDICT_FALSE(pid == pid_ && should_dump_stack_ &&
|
|
|
|
executor_->libunwind_sbox_for_pid_ == 0 &&
|
|
|
|
policy_->GetNamespace())) {
|
2021-06-28 17:02:35 +08:00
|
|
|
auto stack_trace = [this,
|
|
|
|
pid]() -> absl::StatusOr<std::vector<std::string>> {
|
|
|
|
Regs regs(pid);
|
|
|
|
SAPI_RETURN_IF_ERROR(regs.Fetch());
|
|
|
|
return GetStackTrace(®s, policy_->GetNamespace()->mounts());
|
|
|
|
}();
|
|
|
|
|
|
|
|
if (!stack_trace.ok()) {
|
|
|
|
LOG(WARNING) << "FAILED TO GET SANDBOX STACK : " << stack_trace.status();
|
2021-12-03 23:17:02 +08:00
|
|
|
} else if (VLOG_IS_ON(0)) {
|
2020-07-20 15:24:12 +08:00
|
|
|
VLOG(0) << "SANDBOX STACK: PID: " << pid << ", [";
|
2021-06-28 17:02:35 +08:00
|
|
|
for (const auto& frame : *stack_trace) {
|
2020-07-20 15:24:12 +08:00
|
|
|
VLOG(0) << " " << frame;
|
|
|
|
}
|
|
|
|
VLOG(0) << "]";
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
should_dump_stack_ = false;
|
|
|
|
}
|
2019-03-19 18:40:51 +08:00
|
|
|
|
2021-05-21 17:59:06 +08:00
|
|
|
#ifndef PTRACE_EVENT_STOP
|
2019-03-19 00:21:48 +08:00
|
|
|
#define PTRACE_EVENT_STOP 128
|
|
|
|
#endif
|
|
|
|
|
2022-01-13 22:48:44 +08:00
|
|
|
if (is_syscall_exit) {
|
|
|
|
VLOG(2) << "PID: " << pid << " syscall-exit-stop: " << event_msg;
|
|
|
|
EventSyscallExit(pid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
switch (__WPTRACEEVENT(status)) {
|
|
|
|
case PTRACE_EVENT_FORK:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_FORK, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_VFORK:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_VFORK, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_CLONE:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_CLONE, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_VFORK_DONE:
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_EXEC:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_EXEC, PID: " << event_msg;
|
|
|
|
EventPtraceExec(pid, event_msg);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_EXIT:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_EXIT: " << event_msg;
|
|
|
|
EventPtraceExit(pid, event_msg);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_STOP:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_STOP: " << event_msg;
|
|
|
|
EventPtraceStop(pid, stopsig);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_SECCOMP:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_SECCOMP: " << event_msg;
|
|
|
|
EventPtraceSeccomp(pid, event_msg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG(ERROR) << "Unknown ptrace event: " << __WPTRACEEVENT(status)
|
|
|
|
<< " with data: " << event_msg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void Monitor::LogSyscallViolationExplanation(const Syscall& syscall) const {
|
|
|
|
const uintptr_t syscall_nr = syscall.nr();
|
|
|
|
const uintptr_t arg0 = syscall.args()[0];
|
|
|
|
|
|
|
|
// This follows policy in Policy::GetDefaultPolicy - keep it in sync.
|
|
|
|
if (syscall.arch() != Syscall::GetHostArch()) {
|
|
|
|
LOG(ERROR)
|
|
|
|
<< "This is a violation because the syscall was issued because the"
|
|
|
|
<< " sandboxee and executor architectures are different.";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (syscall_nr == __NR_ptrace) {
|
|
|
|
LOG(ERROR)
|
|
|
|
<< "This is a violation because the ptrace syscall would be unsafe in"
|
|
|
|
<< " sandbox2, so it has been blocked.";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (syscall_nr == __NR_bpf) {
|
|
|
|
LOG(ERROR)
|
|
|
|
<< "This is a violation because the bpf syscall would be risky in"
|
|
|
|
<< " a sandbox, so it has been blocked.";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (syscall_nr == __NR_clone && ((arg0 & CLONE_UNTRACED) != 0)) {
|
|
|
|
LOG(ERROR) << "This is a violation because calling clone with CLONE_UNTRACE"
|
|
|
|
<< " would be unsafe in sandbox2, so it has been blocked.";
|
|
|
|
return;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 23:45:22 +08:00
|
|
|
void Monitor::EnableNetworkProxyServer() {
|
|
|
|
int fd = ipc_->ReceiveFd(NetworkProxyClient::kFDName);
|
|
|
|
|
|
|
|
network_proxy_server_ = absl::make_unique<NetworkProxyServer>(
|
|
|
|
fd, &policy_->allowed_hosts_.value(), pthread_self());
|
|
|
|
|
|
|
|
network_proxy_thread_ = std::thread(&NetworkProxyServer::Run,
|
|
|
|
network_proxy_server_.get());
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
} // namespace sandbox2
|