2023-01-30 21:08:42 +08:00
|
|
|
// Copyright 2023 Google LLC
|
2019-03-19 00:21:48 +08:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
2022-01-28 17:38:27 +08:00
|
|
|
// https://www.apache.org/licenses/LICENSE-2.0
|
2019-03-19 00:21:48 +08:00
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2023-01-30 21:08:42 +08:00
|
|
|
// Implementation file for the sandbox2::PtraceMonitor class.
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2023-01-30 21:08:42 +08:00
|
|
|
#include "sandboxed_api/sandbox2/monitor_ptrace.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
#include <sys/ptrace.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <syscall.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <cerrno>
|
|
|
|
#include <csignal>
|
2022-08-09 23:27:28 +08:00
|
|
|
#include <deque>
|
2019-03-19 00:21:48 +08:00
|
|
|
#include <fstream>
|
|
|
|
#include <memory>
|
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
2022-08-09 23:27:28 +08:00
|
|
|
#include <utility>
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "absl/cleanup/cleanup.h"
|
2021-09-10 18:34:21 +08:00
|
|
|
#include "absl/container/flat_hash_set.h"
|
2022-10-20 21:48:06 +08:00
|
|
|
#include "absl/flags/declare.h"
|
|
|
|
#include "absl/flags/flag.h"
|
2021-05-10 22:03:15 +08:00
|
|
|
#include "absl/status/status.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "absl/strings/str_cat.h"
|
|
|
|
#include "absl/time/time.h"
|
2021-01-14 01:25:25 +08:00
|
|
|
#include "sandboxed_api/config.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
#include "sandboxed_api/sandbox2/client.h"
|
|
|
|
#include "sandboxed_api/sandbox2/comms.h"
|
|
|
|
#include "sandboxed_api/sandbox2/executor.h"
|
|
|
|
#include "sandboxed_api/sandbox2/policy.h"
|
|
|
|
#include "sandboxed_api/sandbox2/regs.h"
|
|
|
|
#include "sandboxed_api/sandbox2/result.h"
|
|
|
|
#include "sandboxed_api/sandbox2/sanitizer.h"
|
|
|
|
#include "sandboxed_api/sandbox2/syscall.h"
|
|
|
|
#include "sandboxed_api/sandbox2/util.h"
|
2022-10-20 21:48:06 +08:00
|
|
|
#include "sandboxed_api/util/raw_logging.h"
|
2021-06-28 17:02:35 +08:00
|
|
|
#include "sandboxed_api/util/status_macros.h"
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2023-01-18 17:44:18 +08:00
|
|
|
ABSL_FLAG(bool, sandbox2_log_all_stack_traces, false,
|
|
|
|
"If set, sandbox2 monitor will log stack traces of all monitored "
|
|
|
|
"threads/processes that are reported to terminate with a signal.");
|
|
|
|
|
|
|
|
ABSL_FLAG(absl::Duration, sandbox2_stack_traces_collection_timeout,
|
|
|
|
absl::Seconds(1),
|
|
|
|
"How much time should be spent on logging threads' stack traces on "
|
|
|
|
"monitor shut down. Only relevent when collection of all stack "
|
|
|
|
"traces is enabled.");
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
ABSL_DECLARE_FLAG(bool, sandbox2_danger_danger_permit_all);
|
|
|
|
|
|
|
|
namespace sandbox2 {
|
|
|
|
namespace {
|
|
|
|
|
2022-08-09 23:27:28 +08:00
|
|
|
// Since waitpid() is biased towards newer threads, we run the risk of starving
|
|
|
|
// older threads if the newer ones raise a lot of events.
|
|
|
|
// To avoid it, we use this class to gather all the waiting threads and then
|
|
|
|
// return them one at a time on each call to Wait().
|
|
|
|
// In this way, everyone gets their chance.
|
|
|
|
class PidWaiter {
|
|
|
|
public:
|
|
|
|
// Constructs a PidWaiter where the given priority_pid is checked first.
|
|
|
|
explicit PidWaiter(pid_t priority_pid) : priority_pid_(priority_pid) {}
|
|
|
|
|
|
|
|
// Returns the PID of a thread that needs attention, populating 'status' with
|
|
|
|
// the status returned by the waitpid() call. It returns 0 if no threads
|
|
|
|
// require attention at the moment, or -1 if there was an error, in which case
|
|
|
|
// the error value can be found in 'errno'.
|
|
|
|
int Wait(int* status) {
|
|
|
|
if (statuses_.empty() && last_errno_ == 0) {
|
|
|
|
RefillStatuses();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (statuses_.empty()) {
|
|
|
|
if (last_errno_ == 0) return 0;
|
|
|
|
errno = last_errno_;
|
|
|
|
last_errno_ = 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& entry = statuses_.front();
|
|
|
|
pid_t pid = entry.first;
|
|
|
|
*status = entry.second;
|
|
|
|
statuses_.pop_front();
|
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void RefillStatuses() {
|
|
|
|
statuses_.clear();
|
|
|
|
last_errno_ = 0;
|
|
|
|
pid_t pid = priority_pid_;
|
|
|
|
int status;
|
|
|
|
while (true) {
|
|
|
|
// It should be a non-blocking operation (hence WNOHANG), so this function
|
|
|
|
// returns quickly if there are no events to be processed.
|
|
|
|
pid_t ret =
|
|
|
|
waitpid(pid, &status, __WNOTHREAD | __WALL | WUNTRACED | WNOHANG);
|
|
|
|
if (ret > 0) {
|
|
|
|
statuses_.emplace_back(ret, status);
|
|
|
|
} else if (ret < 0) {
|
|
|
|
last_errno_ = errno;
|
|
|
|
break;
|
|
|
|
} else if (pid == -1) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pid = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pid_t priority_pid_;
|
|
|
|
std::deque<std::pair<pid_t, int>> statuses_ = {};
|
|
|
|
int last_errno_ = 0;
|
|
|
|
};
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// We could use the ProcMapsIterator, however we want the full file content.
|
|
|
|
std::string ReadProcMaps(pid_t pid) {
|
|
|
|
std::ifstream input(absl::StrCat("/proc/", pid, "/maps"),
|
|
|
|
std::ios_base::in | std::ios_base::binary);
|
|
|
|
std::ostringstream contents;
|
|
|
|
contents << input.rdbuf();
|
|
|
|
return contents.str();
|
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
void ContinueProcess(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_CONT, pid, 0, signo) == -1) {
|
2019-05-21 22:30:34 +08:00
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_CONT it";
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_CONT, pid=" << pid << ", sig=" << signo
|
|
|
|
<< ")";
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void StopProcess(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_LISTEN, pid, 0, signo) == -1) {
|
2019-05-21 22:30:34 +08:00
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_LISTEN it";
|
|
|
|
} else {
|
2021-07-12 16:56:06 +08:00
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_LISTEN, pid=" << pid << ", sig=" << signo
|
2019-05-21 22:30:34 +08:00
|
|
|
<< ")";
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-13 22:48:44 +08:00
|
|
|
void CompleteSyscall(pid_t pid, int signo) {
|
|
|
|
if (ptrace(PTRACE_SYSCALL, pid, 0, signo) == -1) {
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
LOG(WARNING) << "Process " << pid
|
|
|
|
<< " died while trying to PTRACE_SYSCALL it";
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_SYSCALL, pid=" << pid << ", sig=" << signo
|
|
|
|
<< ")";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
} // namespace
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
PtraceMonitor::PtraceMonitor(Executor* executor, Policy* policy, Notify* notify)
|
|
|
|
: MonitorBase(executor, policy, notify),
|
2023-02-16 21:06:50 +08:00
|
|
|
wait_for_execve_(executor->enable_sandboxing_pre_execve_) {
|
2023-01-23 17:41:42 +08:00
|
|
|
if (executor_->limits()->wall_time_limit() != absl::ZeroDuration()) {
|
|
|
|
auto deadline = absl::Now() + executor_->limits()->wall_time_limit();
|
|
|
|
deadline_millis_.store(absl::ToUnixMillis(deadline),
|
|
|
|
std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
external_kill_request_flag_.test_and_set(std::memory_order_relaxed);
|
|
|
|
dump_stack_request_flag_.test_and_set(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PtraceMonitor::IsActivelyMonitoring() {
|
2019-03-19 00:21:48 +08:00
|
|
|
// If we're still waiting for execve(), then we allow all syscalls.
|
|
|
|
return !wait_for_execve_;
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::SetActivelyMonitoring() { wait_for_execve_ = false; }
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::SetAdditionalResultInfo(std::unique_ptr<Regs> regs) {
|
2019-05-15 22:46:26 +08:00
|
|
|
pid_t pid = regs->pid();
|
|
|
|
result_.SetRegs(std::move(regs));
|
|
|
|
result_.SetProgName(util::GetProgName(pid));
|
2023-01-23 17:41:42 +08:00
|
|
|
result_.SetProcMaps(ReadProcMaps(pid));
|
2023-02-16 21:06:50 +08:00
|
|
|
if (!ShouldCollectStackTrace(result_.final_status())) {
|
2022-06-23 21:41:53 +08:00
|
|
|
VLOG(1) << "Stack traces have been disabled";
|
2020-07-20 15:24:12 +08:00
|
|
|
return;
|
|
|
|
}
|
2021-06-28 17:02:35 +08:00
|
|
|
|
2023-01-18 17:44:18 +08:00
|
|
|
absl::StatusOr<std::vector<std::string>> stack_trace =
|
|
|
|
GetAndLogStackTrace(result_.GetRegs());
|
2021-06-28 17:02:35 +08:00
|
|
|
if (!stack_trace.ok()) {
|
|
|
|
LOG(ERROR) << "Could not obtain stack trace: " << stack_trace.status();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
result_.set_stack_trace(*stack_trace);
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
bool PtraceMonitor::KillSandboxee() {
|
|
|
|
VLOG(1) << "Sending SIGKILL to the PID: " << process_.main_pid;
|
|
|
|
if (kill(process_.main_pid, SIGKILL) != 0) {
|
|
|
|
PLOG(ERROR) << "Could not send SIGKILL to PID " << process_.main_pid;
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_KILL);
|
2022-05-13 19:34:50 +08:00
|
|
|
return false;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2022-05-13 19:34:50 +08:00
|
|
|
return true;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
bool PtraceMonitor::InterruptSandboxee() {
|
|
|
|
if (ptrace(PTRACE_INTERRUPT, process_.main_pid, 0, 0) == -1) {
|
|
|
|
PLOG(ERROR) << "Could not send interrupt to pid=" << process_.main_pid;
|
2022-05-09 21:57:52 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INTERRUPT);
|
2022-05-13 19:34:50 +08:00
|
|
|
return false;
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
2022-05-13 19:34:50 +08:00
|
|
|
return true;
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// Not defined in glibc.
|
|
|
|
#define __WPTRACEEVENT(x) ((x & 0xff0000) >> 16)
|
2019-05-15 22:46:26 +08:00
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::NotifyMonitor() {
|
|
|
|
absl::ReaderMutexLock lock(¬ify_mutex_);
|
|
|
|
if (thread_ != nullptr) {
|
|
|
|
pthread_kill(thread_->native_handle(), SIGCHLD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PtraceMonitor::Join() {
|
|
|
|
absl::MutexLock lock(¬ify_mutex_);
|
|
|
|
if (thread_) {
|
|
|
|
thread_->join();
|
|
|
|
CHECK(IsDone()) << "Monitor did not terminate";
|
|
|
|
VLOG(1) << "Final execution status: " << result_.ToString();
|
|
|
|
CHECK(result_.final_status() != Result::UNSET);
|
|
|
|
thread_.reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PtraceMonitor::RunInternal() {
|
|
|
|
thread_ = std::make_unique<std::thread>(&PtraceMonitor::Run, this);
|
|
|
|
|
|
|
|
// Wait for the Monitor to set-up the sandboxee correctly (or fail while
|
|
|
|
// doing that). From here on, it is safe to use the IPC object for
|
|
|
|
// non-sandbox-related data exchange.
|
|
|
|
setup_notification_.WaitForNotification();
|
|
|
|
}
|
|
|
|
|
|
|
|
void PtraceMonitor::Run() {
|
|
|
|
absl::Cleanup monitor_done = [this] {
|
|
|
|
getrusage(RUSAGE_THREAD, result_.GetRUsageMonitor());
|
|
|
|
OnDone();
|
|
|
|
};
|
|
|
|
|
|
|
|
absl::Cleanup setup_notify = [this] { setup_notification_.Notify(); };
|
|
|
|
// It'd be costly to initialize the sigset_t for each sigtimedwait()
|
|
|
|
// invocation, so do it once per Monitor.
|
|
|
|
if (!InitSetupSignals()) {
|
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_SIGNALS);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// This call should be the last in the init sequence, because it can cause the
|
|
|
|
// sandboxee to enter ptrace-stopped state, in which it will not be able to
|
|
|
|
// send any messages over the Comms channel.
|
|
|
|
if (!InitPtraceAttach()) {
|
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_PTRACE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tell the parent thread (Sandbox2 object) that we're done with the initial
|
|
|
|
// set-up process of the sandboxee.
|
|
|
|
std::move(setup_notify).Invoke();
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
bool sandboxee_exited = false;
|
2023-01-23 17:41:42 +08:00
|
|
|
PidWaiter pid_waiter(process_.main_pid);
|
2019-05-15 22:46:26 +08:00
|
|
|
int status;
|
|
|
|
// All possible still running children of main process, will be killed due to
|
|
|
|
// PTRACE_O_EXITKILL ptrace() flag.
|
2023-01-23 17:41:42 +08:00
|
|
|
while (result().final_status() == Result::UNSET) {
|
2019-05-15 22:46:26 +08:00
|
|
|
int64_t deadline = deadline_millis_.load(std::memory_order_relaxed);
|
|
|
|
if (deadline != 0 && absl::Now() >= absl::FromUnixMillis(deadline)) {
|
|
|
|
VLOG(1) << "Sandbox process hit timeout due to the walltime timer";
|
|
|
|
timed_out_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!dump_stack_request_flag_.test_and_set(std::memory_order_relaxed)) {
|
|
|
|
should_dump_stack_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!InterruptSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!external_kill_request_flag_.test_and_set(std::memory_order_relaxed)) {
|
|
|
|
external_kill_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_proxy_server_ &&
|
|
|
|
network_proxy_server_->violation_occurred_.load(
|
|
|
|
std::memory_order_acquire) &&
|
|
|
|
!network_violation_) {
|
|
|
|
network_violation_ = true;
|
2022-05-13 19:34:50 +08:00
|
|
|
if (!KillSandboxee()) {
|
|
|
|
break;
|
|
|
|
}
|
2022-05-09 21:57:52 +08:00
|
|
|
}
|
|
|
|
|
2022-08-09 23:27:28 +08:00
|
|
|
pid_t ret = pid_waiter.Wait(&status);
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
constexpr timespec ts = {kWakeUpPeriodSec, kWakeUpPeriodNSec};
|
2023-01-23 17:41:42 +08:00
|
|
|
int signo = sigtimedwait(&sset_, nullptr, &ts);
|
2019-05-15 22:46:26 +08:00
|
|
|
LOG_IF(ERROR, signo != -1 && signo != SIGCHLD)
|
|
|
|
<< "Unknown signal received: " << signo;
|
2019-05-09 02:34:05 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
|
2019-05-09 02:34:05 +08:00
|
|
|
if (ret == -1) {
|
2019-05-15 22:46:26 +08:00
|
|
|
if (errno == ECHILD) {
|
|
|
|
LOG(ERROR) << "PANIC(). The main process has not exited yet, "
|
|
|
|
<< "yet we haven't seen its exit event";
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_CHILD);
|
|
|
|
} else {
|
|
|
|
PLOG(ERROR) << "waitpid() failed";
|
|
|
|
}
|
2019-05-09 02:34:05 +08:00
|
|
|
continue;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2019-05-09 02:34:05 +08:00
|
|
|
VLOG(3) << "waitpid() returned with PID: " << ret << ", status: " << status;
|
|
|
|
|
|
|
|
if (WIFEXITED(status)) {
|
|
|
|
VLOG(1) << "PID: " << ret
|
|
|
|
<< " finished with code: " << WEXITSTATUS(status);
|
|
|
|
// That's the main process, set the exit code, and exit. It will kill
|
|
|
|
// all remaining processes (if there are any) because of the
|
|
|
|
// PTRACE_O_EXITKILL ptrace() flag.
|
2023-01-23 17:41:42 +08:00
|
|
|
if (ret == process_.main_pid) {
|
2019-05-09 02:34:05 +08:00
|
|
|
if (IsActivelyMonitoring()) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::OK, WEXITSTATUS(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
} else {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::SETUP_ERROR, Result::FAILED_MONITOR);
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
sandboxee_exited = true;
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
|
|
|
} else if (WIFSIGNALED(status)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
// This usually does not happen, but might.
|
|
|
|
// Quote from the manual:
|
|
|
|
// A SIGKILL signal may still cause a PTRACE_EVENT_EXIT stop before
|
|
|
|
// actual signal death. This may be changed in the future;
|
2019-05-09 02:34:05 +08:00
|
|
|
VLOG(1) << "PID: " << ret << " terminated with signal: "
|
|
|
|
<< util::GetSignalName(WTERMSIG(status));
|
2023-01-23 17:41:42 +08:00
|
|
|
if (ret == process_.main_pid) {
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_violation_) {
|
|
|
|
SetExitStatusCode(Result::VIOLATION, Result::VIOLATION_NETWORK);
|
|
|
|
result_.SetNetworkViolation(network_proxy_server_->violation_msg_);
|
|
|
|
} else if (external_kill_) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::EXTERNAL_KILL, 0);
|
|
|
|
} else if (timed_out_) {
|
|
|
|
SetExitStatusCode(Result::TIMEOUT, 0);
|
|
|
|
} else {
|
|
|
|
SetExitStatusCode(Result::SIGNALED, WTERMSIG(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
sandboxee_exited = true;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-09 02:34:05 +08:00
|
|
|
} else if (WIFSTOPPED(status)) {
|
|
|
|
VLOG(2) << "PID: " << ret
|
|
|
|
<< " received signal: " << util::GetSignalName(WSTOPSIG(status))
|
2021-04-16 03:01:42 +08:00
|
|
|
<< " with event: "
|
|
|
|
<< util::GetPtraceEventName(__WPTRACEEVENT(status));
|
2019-05-09 02:34:05 +08:00
|
|
|
StateProcessStopped(ret, status);
|
|
|
|
} else if (WIFCONTINUED(status)) {
|
|
|
|
VLOG(2) << "PID: " << ret << " is being continued";
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
}
|
2023-01-18 17:44:18 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
if (!sandboxee_exited) {
|
2023-01-18 17:44:18 +08:00
|
|
|
const bool log_stack_traces =
|
|
|
|
result_.final_status() != Result::OK &&
|
|
|
|
absl::GetFlag(FLAGS_sandbox2_log_all_stack_traces);
|
|
|
|
if (!log_stack_traces) {
|
|
|
|
// Try to make sure main pid is killed and reaped
|
2023-01-23 17:41:42 +08:00
|
|
|
kill(process_.main_pid, SIGKILL);
|
2023-01-18 17:44:18 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
constexpr auto kGracefullExitTimeout = absl::Milliseconds(200);
|
|
|
|
auto deadline = absl::Now() + kGracefullExitTimeout;
|
2023-01-18 17:44:18 +08:00
|
|
|
if (log_stack_traces) {
|
|
|
|
deadline = absl::Now() +
|
|
|
|
absl::GetFlag(FLAGS_sandbox2_stack_traces_collection_timeout);
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
for (;;) {
|
|
|
|
auto left = deadline - absl::Now();
|
|
|
|
if (absl::Now() >= deadline) {
|
|
|
|
LOG(INFO) << "Waiting for sandboxee exit timed out";
|
|
|
|
break;
|
|
|
|
}
|
2022-08-09 23:27:28 +08:00
|
|
|
pid_t ret = pid_waiter.Wait(&status);
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ret == -1) {
|
2023-01-18 17:44:18 +08:00
|
|
|
if (!log_stack_traces || ret != ECHILD) {
|
|
|
|
PLOG(ERROR) << "waitpid() failed";
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2023-01-23 17:41:42 +08:00
|
|
|
if (!log_stack_traces && ret == process_.main_pid &&
|
2023-01-18 17:44:18 +08:00
|
|
|
(WIFSIGNALED(status) || WIFEXITED(status))) {
|
2019-05-15 22:46:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2023-01-18 17:44:18 +08:00
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
auto ts = absl::ToTimespec(left);
|
2023-01-23 17:41:42 +08:00
|
|
|
sigtimedwait(&sset_, nullptr, &ts);
|
2023-01-18 17:44:18 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WIFSTOPPED(status)) {
|
|
|
|
if (log_stack_traces) {
|
|
|
|
LogStackTraceOfPid(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__WPTRACEEVENT(status) == PTRACE_EVENT_EXIT) {
|
|
|
|
VLOG(2) << "PID: " << ret << " PTRACE_EVENT_EXIT ";
|
|
|
|
ContinueProcess(ret, 0);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!log_stack_traces) {
|
2023-01-23 17:41:42 +08:00
|
|
|
kill(process_.main_pid, SIGKILL);
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::LogStackTraceOfPid(pid_t pid) {
|
2023-01-18 17:44:18 +08:00
|
|
|
if (!StackTraceCollectionPossible()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Regs regs(pid);
|
|
|
|
if (auto status = regs.Fetch(); !status.ok()) {
|
|
|
|
LOG(ERROR) << "Failed to get regs, PID:" << pid << " status:" << status;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto stack_trace = GetAndLogStackTrace(®s); !stack_trace.ok()) {
|
|
|
|
LOG(ERROR) << "Failed to get stack trace, PID:" << pid
|
|
|
|
<< " status:" << stack_trace.status();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
bool PtraceMonitor::InitSetupSignals() {
|
|
|
|
if (sigemptyset(&sset_) == -1) {
|
2019-05-15 22:46:26 +08:00
|
|
|
PLOG(ERROR) << "sigemptyset()";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// sigtimedwait will react (wake-up) to arrival of this signal.
|
2023-01-23 17:41:42 +08:00
|
|
|
if (sigaddset(&sset_, SIGCHLD) == -1) {
|
2019-05-15 22:46:26 +08:00
|
|
|
PLOG(ERROR) << "sigaddset(SIGCHLD)";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
if (pthread_sigmask(SIG_BLOCK, &sset_, nullptr) == -1) {
|
2019-05-15 22:46:26 +08:00
|
|
|
PLOG(ERROR) << "pthread_sigmask(SIG_BLOCK, SIGCHLD)";
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
bool PtraceMonitor::InitPtraceAttach() {
|
|
|
|
if (process_.init_pid > 0) {
|
|
|
|
if (ptrace(PTRACE_SEIZE, process_.init_pid, 0, PTRACE_O_EXITKILL) != 0) {
|
|
|
|
if (errno != ESRCH) {
|
|
|
|
PLOG(ERROR) << "attaching to init process failed";
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// Get a list of tasks.
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks;
|
2023-01-23 17:41:42 +08:00
|
|
|
if (auto task_list = sanitizer::GetListOfTasks(process_.main_pid);
|
|
|
|
task_list.ok()) {
|
2021-09-10 18:34:21 +08:00
|
|
|
tasks = *std::move(task_list);
|
|
|
|
} else {
|
|
|
|
LOG(ERROR) << "Could not get list of tasks: "
|
|
|
|
<< task_list.status().message();
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
if (tasks.find(process_.main_pid) == tasks.end()) {
|
|
|
|
LOG(ERROR) << "The pid " << process_.main_pid
|
|
|
|
<< " was not found in its own tasklist.";
|
2019-12-19 00:23:31 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// With TSYNC, we can allow threads: seccomp applies to all threads.
|
|
|
|
if (tasks.size() > 1) {
|
2023-01-23 17:41:42 +08:00
|
|
|
LOG(WARNING) << "PID " << process_.main_pid << " has " << tasks.size()
|
|
|
|
<< " threads,"
|
2019-03-19 00:21:48 +08:00
|
|
|
<< " at the time of call to SandboxMeHere. If you are seeing"
|
|
|
|
<< " more sandbox violations than expected, this might be"
|
|
|
|
<< " the reason why"
|
|
|
|
<< ".";
|
|
|
|
}
|
|
|
|
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks_attached;
|
2019-12-19 00:23:31 +08:00
|
|
|
int retries = 0;
|
|
|
|
absl::Time deadline = absl::Now() + absl::Seconds(2);
|
|
|
|
|
|
|
|
// In some situations we allow ptrace to try again when it fails.
|
|
|
|
while (!tasks.empty()) {
|
2021-09-10 18:34:21 +08:00
|
|
|
absl::flat_hash_set<int> tasks_left;
|
2019-12-19 00:23:31 +08:00
|
|
|
for (int task : tasks) {
|
|
|
|
constexpr intptr_t options =
|
|
|
|
PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK |
|
|
|
|
PTRACE_O_TRACEVFORKDONE | PTRACE_O_TRACECLONE | PTRACE_O_TRACEEXEC |
|
|
|
|
PTRACE_O_TRACEEXIT | PTRACE_O_TRACESECCOMP | PTRACE_O_EXITKILL;
|
|
|
|
int ret = ptrace(PTRACE_SEIZE, task, 0, options);
|
|
|
|
if (ret != 0) {
|
|
|
|
if (errno == EPERM) {
|
|
|
|
// Sometimes when a task is exiting we can get an EPERM from ptrace.
|
|
|
|
// Let's try again up until the timeout in this situation.
|
|
|
|
PLOG(WARNING) << "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options))
|
|
|
|
<< "), trying again...";
|
|
|
|
tasks_left.insert(task);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
// A task may have exited since we captured the task list, we will
|
|
|
|
// allow things to continue after we log a warning.
|
|
|
|
PLOG(WARNING)
|
|
|
|
<< "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options))
|
|
|
|
<< ") skipping exited task. Continuing with other tasks.";
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Any other errno will be considered a failure.
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_SEIZE, " << task << ", "
|
|
|
|
<< absl::StrCat("0x", absl::Hex(options)) << ") failed.";
|
|
|
|
return false;
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
tasks_attached.insert(task);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
if (!tasks_left.empty()) {
|
|
|
|
if (absl::Now() < deadline) {
|
|
|
|
LOG(ERROR) << "Attaching to sandboxee timed out: could not attach to "
|
|
|
|
<< tasks_left.size() << " tasks";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Exponential Backoff.
|
|
|
|
constexpr absl::Duration kInitialRetry = absl::Milliseconds(1);
|
|
|
|
constexpr absl::Duration kMaxRetry = absl::Milliseconds(20);
|
|
|
|
const absl::Duration retry_interval =
|
|
|
|
kInitialRetry * (1 << std::min(10, retries++));
|
|
|
|
absl::SleepFor(
|
|
|
|
std::min({retry_interval, kMaxRetry, deadline - absl::Now()}));
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-12-19 00:23:31 +08:00
|
|
|
tasks = std::move(tasks_left);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get a list of tasks after attaching.
|
2023-01-23 17:41:42 +08:00
|
|
|
if (auto tasks_list = sanitizer::GetListOfTasks(process_.main_pid);
|
|
|
|
tasks_list.ok()) {
|
2021-09-10 18:34:21 +08:00
|
|
|
tasks = *std::move(tasks_list);
|
|
|
|
} else {
|
|
|
|
LOG(ERROR) << "Could not get list of tasks: "
|
|
|
|
<< tasks_list.status().message();
|
2019-03-19 00:21:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-12-19 00:23:31 +08:00
|
|
|
// Check that we attached to all the threads
|
|
|
|
if (tasks_attached != tasks) {
|
2023-01-23 17:41:42 +08:00
|
|
|
LOG(ERROR) << "The pid " << process_.main_pid
|
2019-03-19 00:21:48 +08:00
|
|
|
<< " spawned new threads while we were trying to attach to it.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No glibc wrapper for gettid - see 'man gettid'.
|
|
|
|
VLOG(1) << "Monitor (PID: " << getpid()
|
|
|
|
<< ", TID: " << util::Syscall(__NR_gettid)
|
2023-01-23 17:41:42 +08:00
|
|
|
<< ") attached to PID: " << process_.main_pid;
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
// Technically, the sandboxee can be in a ptrace-stopped state right now,
|
|
|
|
// because some signal could have arrived in the meantime. Yet, this
|
|
|
|
// Comms::SendUint32 call shouldn't lock our process, because the underlying
|
|
|
|
// socketpair() channel is buffered, hence it will accept the uint32_t message
|
|
|
|
// no matter what is the current state of the sandboxee, and it will allow for
|
|
|
|
// our process to continue and unlock the sandboxee with the proper ptrace
|
|
|
|
// event handling.
|
|
|
|
if (!comms_->SendUint32(Client::kSandbox2ClientDone)) {
|
|
|
|
LOG(ERROR) << "Couldn't send Client::kSandbox2ClientDone message";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::ActionProcessSyscall(Regs* regs, const Syscall& syscall) {
|
2019-03-19 00:21:48 +08:00
|
|
|
// If the sandboxing is not enabled yet, allow the first __NR_execveat.
|
|
|
|
if (syscall.nr() == __NR_execveat && !IsActivelyMonitoring()) {
|
|
|
|
VLOG(1) << "[PERMITTED/BEFORE_EXECVEAT]: "
|
|
|
|
<< "SYSCALL ::: PID: " << regs->pid() << ", PROG: '"
|
|
|
|
<< util::GetProgName(regs->pid())
|
|
|
|
<< "' : " << syscall.GetDescription();
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify can decide whether we want to allow this syscall. It could be useful
|
|
|
|
// for sandbox setups in which some syscalls might still need some logging,
|
|
|
|
// but nonetheless be allowed ('permissible syscalls' in sandbox v1).
|
2022-01-13 22:48:44 +08:00
|
|
|
auto trace_response = notify_->EventSyscallTrace(syscall);
|
|
|
|
if (trace_response == Notify::TraceAction::kAllow) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
2022-01-13 22:48:44 +08:00
|
|
|
if (trace_response == Notify::TraceAction::kInspectAfterReturn) {
|
|
|
|
// Note that a process might die without an exit-stop before the syscall is
|
|
|
|
// completed (eg. a thread calls execve() and the thread group leader dies),
|
2022-07-25 23:27:27 +08:00
|
|
|
// so the entry is removed when the process exits.
|
2022-01-13 22:48:44 +08:00
|
|
|
syscalls_in_progress_[regs->pid()] = syscall;
|
|
|
|
CompleteSyscall(regs->pid(), 0);
|
|
|
|
return;
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
|
|
|
|
// TODO(wiktorg): Further clean that up, probably while doing monitor cleanup
|
|
|
|
// log_file_ not null iff FLAGS_sandbox2_danger_danger_permit_all_and_log is
|
|
|
|
// set.
|
|
|
|
if (log_file_) {
|
|
|
|
std::string syscall_description = syscall.GetDescription();
|
|
|
|
PCHECK(absl::FPrintF(log_file_, "PID: %d %s\n", regs->pid(),
|
|
|
|
syscall_description) >= 0);
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (absl::GetFlag(FLAGS_sandbox2_danger_danger_permit_all)) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(regs->pid(), 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ActionProcessSyscallViolation(regs, syscall, kSyscallViolation);
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::ActionProcessSyscallViolation(
|
|
|
|
Regs* regs, const Syscall& syscall, ViolationType violation_type) {
|
2019-05-15 22:46:26 +08:00
|
|
|
LogSyscallViolation(syscall);
|
2019-03-19 00:21:48 +08:00
|
|
|
notify_->EventSyscallViolation(syscall, violation_type);
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::VIOLATION, syscall.nr());
|
2022-10-12 20:22:51 +08:00
|
|
|
result_.SetSyscall(std::make_unique<Syscall>(syscall));
|
|
|
|
SetAdditionalResultInfo(std::make_unique<Regs>(*regs));
|
2019-05-15 22:46:26 +08:00
|
|
|
// Rewrite the syscall argument to something invalid (-1).
|
|
|
|
// The process will be killed anyway so this is just a precaution.
|
2019-03-19 00:21:48 +08:00
|
|
|
auto status = regs->SkipSyscallReturnValue(-ENOSYS);
|
|
|
|
if (!status.ok()) {
|
|
|
|
LOG(ERROR) << status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventPtraceSeccomp(pid_t pid, int event_msg) {
|
2022-08-31 23:09:05 +08:00
|
|
|
if (event_msg < sapi::cpu::Architecture::kUnknown ||
|
|
|
|
event_msg > sapi::cpu::Architecture::kMax) {
|
|
|
|
// We've observed that, if the process has exited, the event_msg may contain
|
|
|
|
// the exit status even though we haven't received the exit event yet.
|
|
|
|
// To work around this, if the event msg is not in the range of the known
|
|
|
|
// architectures, we assume that it's an exit status. We deal with it by
|
|
|
|
// ignoring this event, and we'll get the exit event in the next iteration.
|
|
|
|
LOG(WARNING) << "received event_msg for unknown architecture: " << event_msg
|
|
|
|
<< "; the program may have exited";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
// If the seccomp-policy is using RET_TRACE, we request that it returns the
|
|
|
|
// syscall architecture identifier in the SECCOMP_RET_DATA.
|
2021-01-14 01:25:25 +08:00
|
|
|
const auto syscall_arch = static_cast<sapi::cpu::Architecture>(event_msg);
|
2019-03-19 00:21:48 +08:00
|
|
|
Regs regs(pid);
|
|
|
|
auto status = regs.Fetch();
|
|
|
|
if (!status.ok()) {
|
2022-05-09 21:57:52 +08:00
|
|
|
// Ignore if process is killed in the meanwhile
|
|
|
|
if (absl::IsNotFound(status)) {
|
|
|
|
LOG(WARNING) << "failed to fetch regs: " << status;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Syscall syscall = regs.ToSyscall(syscall_arch);
|
|
|
|
// If the architecture of the syscall used is different that the current host
|
|
|
|
// architecture, report a violation.
|
|
|
|
if (syscall_arch != Syscall::GetHostArch()) {
|
|
|
|
ActionProcessSyscallViolation(®s, syscall, kArchitectureSwitchViolation);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ActionProcessSyscall(®s, syscall);
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventSyscallExit(pid_t pid) {
|
2022-01-13 22:48:44 +08:00
|
|
|
// Check that the monitor wants to inspect the current syscall's return value.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index == syscalls_in_progress_.end()) {
|
|
|
|
LOG(ERROR) << "Expected a syscall in progress in PID " << pid;
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Regs regs(pid);
|
|
|
|
auto status = regs.Fetch();
|
|
|
|
if (!status.ok()) {
|
2022-05-09 21:57:52 +08:00
|
|
|
// Ignore if process is killed in the meanwhile
|
|
|
|
if (absl::IsNotFound(status)) {
|
|
|
|
LOG(WARNING) << "failed to fetch regs: " << status;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
2022-01-13 22:48:44 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int64_t return_value = regs.GetReturnValue(sapi::host_cpu::Architecture());
|
|
|
|
notify_->EventSyscallReturn(index->second, return_value);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
ContinueProcess(pid, 0);
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventPtraceNewProcess(pid_t pid, int event_msg) {
|
2022-01-13 22:48:44 +08:00
|
|
|
// ptrace doesn't issue syscall-exit-stops for successful fork/vfork/clone
|
|
|
|
// system calls. Check if the monitor wanted to inspect the syscall's return
|
|
|
|
// value, and call EventSyscallReturn for the parent process if so.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index != syscalls_in_progress_.end()) {
|
|
|
|
auto syscall_nr = index->second.nr();
|
|
|
|
bool creating_new_process = syscall_nr == __NR_clone;
|
2023-01-06 02:01:32 +08:00
|
|
|
#ifdef __NR_clone3
|
|
|
|
creating_new_process = creating_new_process || syscall_nr == __NR_clone3;
|
|
|
|
#endif
|
2022-01-13 22:48:44 +08:00
|
|
|
#ifdef __NR_fork
|
|
|
|
creating_new_process = creating_new_process || syscall_nr == __NR_fork;
|
|
|
|
#endif
|
|
|
|
#ifdef __NR_vfork
|
|
|
|
creating_new_process = creating_new_process || syscall_nr == __NR_vfork;
|
|
|
|
#endif
|
|
|
|
if (!creating_new_process) {
|
|
|
|
LOG(ERROR) << "Expected a fork/vfork/clone syscall in progress in PID "
|
|
|
|
<< pid << "; actual: " << index->second.GetDescription();
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
notify_->EventSyscallReturn(index->second, event_msg);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
}
|
|
|
|
ContinueProcess(pid, 0);
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventPtraceExec(pid_t pid, int event_msg) {
|
2019-03-19 00:21:48 +08:00
|
|
|
if (!IsActivelyMonitoring()) {
|
|
|
|
VLOG(1) << "PTRACE_EVENT_EXEC seen from PID: " << event_msg
|
|
|
|
<< ". SANDBOX ENABLED!";
|
|
|
|
SetActivelyMonitoring();
|
2022-01-13 22:48:44 +08:00
|
|
|
} else {
|
|
|
|
// ptrace doesn't issue syscall-exit-stops for successful execve/execveat
|
|
|
|
// system calls. Check if the monitor wanted to inspect the syscall's return
|
|
|
|
// value, and call EventSyscallReturn if so.
|
|
|
|
auto index = syscalls_in_progress_.find(pid);
|
|
|
|
if (index != syscalls_in_progress_.end()) {
|
|
|
|
auto syscall_nr = index->second.nr();
|
|
|
|
if (syscall_nr != __NR_execve && syscall_nr != __NR_execveat) {
|
|
|
|
LOG(ERROR) << "Expected an execve/execveat syscall in progress in PID "
|
|
|
|
<< pid << "; actual: " << index->second.GetDescription();
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_INSPECT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
notify_->EventSyscallReturn(index->second, 0);
|
|
|
|
syscalls_in_progress_.erase(index);
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventPtraceExit(pid_t pid, int event_msg) {
|
2022-07-25 23:27:27 +08:00
|
|
|
// Forget about any syscalls in progress for this PID.
|
|
|
|
syscalls_in_progress_.erase(pid);
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
// A regular exit, let it continue (fast-path).
|
2023-01-23 17:41:42 +08:00
|
|
|
if (ABSL_PREDICT_TRUE(WIFEXITED(event_msg) &&
|
|
|
|
(!policy_->collect_stacktrace_on_exit_ ||
|
|
|
|
pid != process_.main_pid))) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-05-09 21:57:52 +08:00
|
|
|
const bool is_seccomp =
|
|
|
|
WIFSIGNALED(event_msg) && WTERMSIG(event_msg) == SIGSYS;
|
2023-01-18 17:44:18 +08:00
|
|
|
const bool log_stack_trace =
|
|
|
|
absl::GetFlag(FLAGS_sandbox2_log_all_stack_traces);
|
2019-05-15 22:46:26 +08:00
|
|
|
// Fetch the registers as we'll need them to fill the result in any case
|
2022-10-12 20:22:51 +08:00
|
|
|
auto regs = std::make_unique<Regs>(pid);
|
2023-01-23 17:41:42 +08:00
|
|
|
if (is_seccomp || pid == process_.main_pid || log_stack_trace) {
|
2022-05-09 21:57:52 +08:00
|
|
|
auto status = regs->Fetch();
|
|
|
|
if (!status.ok()) {
|
|
|
|
LOG(ERROR) << "failed to fetch regs: " << status;
|
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_FETCH);
|
|
|
|
return;
|
|
|
|
}
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2019-05-15 22:46:26 +08:00
|
|
|
// Process signaled due to seccomp violation.
|
2022-05-09 21:57:52 +08:00
|
|
|
if (is_seccomp) {
|
2019-05-15 22:46:26 +08:00
|
|
|
VLOG(1) << "PID: " << pid << " violation uncovered via the EXIT_EVENT";
|
|
|
|
ActionProcessSyscallViolation(
|
|
|
|
regs.get(), regs->ToSyscall(Syscall::GetHostArch()), kSyscallViolation);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-16 18:12:39 +08:00
|
|
|
// This can be reached in four cases:
|
2019-05-15 22:46:26 +08:00
|
|
|
// 1) Process was killed from the sandbox.
|
|
|
|
// 2) Process was killed because it hit a timeout.
|
|
|
|
// 3) Regular signal/other exit cause.
|
2021-08-16 18:12:39 +08:00
|
|
|
// 4) Normal exit for which we want to obtain stack trace.
|
2023-01-23 17:41:42 +08:00
|
|
|
if (pid == process_.main_pid) {
|
2019-05-15 22:46:26 +08:00
|
|
|
VLOG(1) << "PID: " << pid << " main special exit";
|
2020-02-20 23:45:22 +08:00
|
|
|
if (network_violation_) {
|
|
|
|
SetExitStatusCode(Result::VIOLATION, Result::VIOLATION_NETWORK);
|
|
|
|
result_.SetNetworkViolation(network_proxy_server_->violation_msg_);
|
|
|
|
} else if (external_kill_) {
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::EXTERNAL_KILL, 0);
|
|
|
|
} else if (timed_out_) {
|
|
|
|
SetExitStatusCode(Result::TIMEOUT, 0);
|
2021-08-16 18:12:39 +08:00
|
|
|
} else if (WIFEXITED(event_msg)) {
|
|
|
|
SetExitStatusCode(Result::OK, WEXITSTATUS(event_msg));
|
2019-05-15 22:46:26 +08:00
|
|
|
} else {
|
|
|
|
SetExitStatusCode(Result::SIGNALED, WTERMSIG(event_msg));
|
|
|
|
}
|
|
|
|
SetAdditionalResultInfo(std::move(regs));
|
2023-01-18 17:44:18 +08:00
|
|
|
} else if (log_stack_trace) {
|
|
|
|
// In case pid == pid_ the stack trace will be logged anyway. So we need
|
|
|
|
// to do explicit logging only when this is not a main PID.
|
|
|
|
if (StackTraceCollectionPossible()) {
|
|
|
|
if (auto stack_trace = GetAndLogStackTrace(regs.get());
|
|
|
|
!stack_trace.ok()) {
|
|
|
|
LOG(ERROR) << "Failed to get stack trace, PID:" << pid
|
|
|
|
<< " status:" << stack_trace.status();
|
|
|
|
}
|
|
|
|
}
|
2019-05-15 22:46:26 +08:00
|
|
|
}
|
|
|
|
VLOG(1) << "Continuing";
|
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::EventPtraceStop(pid_t pid, int stopsig) {
|
2019-03-19 00:21:48 +08:00
|
|
|
// It's not a real stop signal. For example PTRACE_O_TRACECLONE and similar
|
|
|
|
// flags to ptrace(PTRACE_SEIZE) might generate this event with SIGTRAP.
|
|
|
|
if (stopsig != SIGSTOP && stopsig != SIGTSTP && stopsig != SIGTTIN &&
|
|
|
|
stopsig != SIGTTOU) {
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// It's our PID stop signal. Stop it.
|
|
|
|
VLOG(2) << "PID: " << pid << " stopped due to "
|
|
|
|
<< util::GetSignalName(stopsig);
|
2019-05-15 22:46:26 +08:00
|
|
|
StopProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
void PtraceMonitor::StateProcessStopped(pid_t pid, int status) {
|
2019-03-19 00:21:48 +08:00
|
|
|
int stopsig = WSTOPSIG(status);
|
2022-01-13 22:48:44 +08:00
|
|
|
// We use PTRACE_O_TRACESYSGOOD, so we can tell it's a syscall stop without
|
|
|
|
// calling PTRACE_GETSIGINFO by checking the value of the reported signal.
|
|
|
|
bool is_syscall_exit = stopsig == (SIGTRAP | 0x80);
|
|
|
|
if (__WPTRACEEVENT(status) == 0 && !is_syscall_exit) {
|
2019-03-19 00:21:48 +08:00
|
|
|
// Must be a regular signal delivery.
|
|
|
|
VLOG(2) << "PID: " << pid
|
|
|
|
<< " received signal: " << util::GetSignalName(stopsig);
|
|
|
|
notify_->EventSignal(pid, stopsig);
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, stopsig);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long event_msg; // NOLINT
|
|
|
|
if (ptrace(PTRACE_GETEVENTMSG, pid, 0, &event_msg) == -1) {
|
|
|
|
if (errno == ESRCH) {
|
|
|
|
// This happens from time to time, the kernel does not guarantee us that
|
|
|
|
// we get the event in time.
|
|
|
|
PLOG(INFO) << "ptrace(PTRACE_GETEVENTMSG, " << pid << ")";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PLOG(ERROR) << "ptrace(PTRACE_GETEVENTMSG, " << pid << ")";
|
2019-05-15 22:46:26 +08:00
|
|
|
SetExitStatusCode(Result::INTERNAL_ERROR, Result::FAILED_GETEVENT);
|
2019-03-19 00:21:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:41:42 +08:00
|
|
|
if (ABSL_PREDICT_FALSE(pid == process_.main_pid && should_dump_stack_ &&
|
2019-05-15 22:46:26 +08:00
|
|
|
executor_->libunwind_sbox_for_pid_ == 0 &&
|
|
|
|
policy_->GetNamespace())) {
|
2021-06-28 17:02:35 +08:00
|
|
|
auto stack_trace = [this,
|
|
|
|
pid]() -> absl::StatusOr<std::vector<std::string>> {
|
|
|
|
Regs regs(pid);
|
|
|
|
SAPI_RETURN_IF_ERROR(regs.Fetch());
|
2023-02-16 21:06:50 +08:00
|
|
|
return GetStackTrace(®s);
|
2021-06-28 17:02:35 +08:00
|
|
|
}();
|
|
|
|
|
|
|
|
if (!stack_trace.ok()) {
|
|
|
|
LOG(WARNING) << "FAILED TO GET SANDBOX STACK : " << stack_trace.status();
|
2022-10-20 21:48:06 +08:00
|
|
|
} else if (SAPI_VLOG_IS_ON(0)) {
|
2020-07-20 15:24:12 +08:00
|
|
|
VLOG(0) << "SANDBOX STACK: PID: " << pid << ", [";
|
2021-06-28 17:02:35 +08:00
|
|
|
for (const auto& frame : *stack_trace) {
|
2020-07-20 15:24:12 +08:00
|
|
|
VLOG(0) << " " << frame;
|
|
|
|
}
|
|
|
|
VLOG(0) << "]";
|
2019-03-19 00:21:48 +08:00
|
|
|
}
|
|
|
|
should_dump_stack_ = false;
|
|
|
|
}
|
2019-03-19 18:40:51 +08:00
|
|
|
|
2021-05-21 17:59:06 +08:00
|
|
|
#ifndef PTRACE_EVENT_STOP
|
2019-03-19 00:21:48 +08:00
|
|
|
#define PTRACE_EVENT_STOP 128
|
|
|
|
#endif
|
|
|
|
|
2022-01-13 22:48:44 +08:00
|
|
|
if (is_syscall_exit) {
|
|
|
|
VLOG(2) << "PID: " << pid << " syscall-exit-stop: " << event_msg;
|
|
|
|
EventSyscallExit(pid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-19 00:21:48 +08:00
|
|
|
switch (__WPTRACEEVENT(status)) {
|
|
|
|
case PTRACE_EVENT_FORK:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_FORK, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_VFORK:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_VFORK, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_CLONE:
|
2022-01-13 22:48:44 +08:00
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_CLONE, PID: " << event_msg;
|
|
|
|
EventPtraceNewProcess(pid, event_msg);
|
|
|
|
break;
|
2019-03-19 00:21:48 +08:00
|
|
|
case PTRACE_EVENT_VFORK_DONE:
|
2019-05-15 22:46:26 +08:00
|
|
|
ContinueProcess(pid, 0);
|
2019-03-19 00:21:48 +08:00
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_EXEC:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_EXEC, PID: " << event_msg;
|
|
|
|
EventPtraceExec(pid, event_msg);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_EXIT:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_EXIT: " << event_msg;
|
|
|
|
EventPtraceExit(pid, event_msg);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_STOP:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_STOP: " << event_msg;
|
|
|
|
EventPtraceStop(pid, stopsig);
|
|
|
|
break;
|
|
|
|
case PTRACE_EVENT_SECCOMP:
|
|
|
|
VLOG(2) << "PID: " << pid << " PTRACE_EVENT_SECCOMP: " << event_msg;
|
|
|
|
EventPtraceSeccomp(pid, event_msg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG(ERROR) << "Unknown ptrace event: " << __WPTRACEEVENT(status)
|
|
|
|
<< " with data: " << event_msg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace sandbox2
|