Merge pull request #62 from andreimedar:libarchive

PiperOrigin-RevId: 336042102
Change-Id: I7b1ceaa794851c10e07dbdef4f4e37000edc25d4
This commit is contained in:
Copybara-Service 2020-10-08 02:06:50 -07:00
commit 33bc36ae3d
13 changed files with 1444 additions and 0 deletions

View File

@ -0,0 +1,3 @@
build/
.cache
.vscode

View File

@ -0,0 +1,61 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.16)
project(libarchive_sapi CXX)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED 17)
# Build SAPI library
#set(SAPI_ROOT "" CACHE PATH "Path to the Sandboxed API source tree")
set(SAPI_ROOT "/usr/local/google/home/amedar/internship/sandboxed-api" CACHE PATH "Path to the Sandboxed API source tree")
include(FetchContent)
FetchContent_Declare(
libarchive
GIT_REPOSITORY https://github.com/libarchive/libarchive
PATCH_COMMAND cd libarchive && patch < ${CMAKE_SOURCE_DIR}/patches/header.patch && patch < ${CMAKE_SOURCE_DIR}/patches/archive_virtual.patch
)
FetchContent_MakeAvailable(libarchive)
add_subdirectory("${SAPI_ROOT}"
"${CMAKE_BINARY_DIR}/sandboxed-api-build"
EXCLUDE_FROM_ALL
)
file(STRINGS functions_to_sandbox.txt FUNCTIONS_LIST)
add_sapi_library(
libarchive_sapi
FUNCTIONS ${FUNCTIONS_LIST}
INPUTS
${CMAKE_BINARY_DIR}/_deps/libarchive-src/libarchive/archive.h
${CMAKE_BINARY_DIR}/_deps/libarchive-src/libarchive/archive_entry.h
LIBRARY archive_static
LIBRARY_NAME Libarchive
NAMESPACE ""
)
target_include_directories(libarchive_sapi INTERFACE
"${PROJECT_BINARY_DIR}" # To find the generated SAPI header
)
add_subdirectory(examples)
add_subdirectory(test)

View File

@ -0,0 +1,48 @@
# libarchive Sandboxed API
Sandboxed version of the [libarchive](https://www.libarchive.org/) minitar [example](https://github.com/libarchive/libarchive/blob/master/examples/minitar/minitar.c) using [Sandboxed API](https://github.com/google/sandboxed-api).
## Build
```
mkdir -p build && cd build
cmake .. -G Ninja
cmake --build .
```
The example binary file can be found at **build/examples/sapi_minitar** and the unit tests at **build/test/sapi_minitar_test**.
## Patches
The original libarchive code required patching since one of the custom types produced errors with libclang Python byndings. The patches are applied automatically during the build step and they do not modify the functionality of the library. The repository is also fetched automatically.
## Examples
In this project, the minitar example is sandboxed.
The code is found in the **examples** directory and is structured as follows:
- **sapi_minitar_main.cc** - ***main*** function of the minitar tool. This is mostly similar to the original example.
- **sapi_minitar.h** and **sapi_minitar.cc** - The two main functions (***CreateArchive*** and ***ExtractArchive***) and other helper functions.
- **sandbox.h** - Custom security policies, depending on the whether the user creates or extracts an archive.
On top of that, unit tests can be found in the **test/minitar_test.cc** file.
## Usage
The unit tests can be executed with `./build/test/sapi_minitar_test`.
The **sapi_minitar** command line tool can be used in the same way as the original example. It is also similar to the [tar](https://man7.org/linux/man-pages/man1/tar.1.html) command, only with fewer options:
`./build/examples/sapi_minitar -[options] [-f file] [files]`
The available options are:
- *c* - Create archive.
- *x* - Extract archive.
- *t* - Extract archive but only print entries.
- *p* - Preserve.
- *v* - Verbose.
- *j* or *y* - Compress with BZIP2.
- *Z* - Default compression.
- *z* - Compress with GZIP.
If no compression method is chosen (in the case of archive creation) the files will only be stored.

View File

@ -0,0 +1,43 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
add_library(sapi_minitar_lib STATIC
sapi_minitar.cc
sapi_minitar.h
sandbox.h
)
target_link_libraries(sapi_minitar_lib PUBLIC
glog::glog
libarchive_sapi
sandbox2::executor
sandbox2::fileops
sandbox2::file_base
sandbox2::util
sandbox2::temp_file
sapi::sapi
)
target_include_directories(sapi_minitar_lib INTERFACE
"${PROJECT_SOURCE_DIR}/examples"
)
add_executable(sapi_minitar
sapi_minitar_main.cc
)
target_link_libraries(sapi_minitar PRIVATE
sapi_minitar_lib
)

View File

@ -0,0 +1,147 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SAPI_LIBARCHIVE_EXAMPLES_SANDBOX_H
#define SAPI_LIBARCHIVE_EXAMPLES_SANDBOX_H
#include <asm/unistd_64.h>
#include <linux/fs.h>
#include "libarchive_sapi.sapi.h" // NOLINT(build/include)
#include "sandboxed_api/sandbox2/util/bpf_helper.h"
#include "sandboxed_api/sandbox2/util/fileops.h"
// When creating an archive, we need read permissions on each of the
// file/directory added in the archive. Also, in order to create the archive, we
// map "/output" with the basename of the archive. This way, the program can
// create the file without having access to anything else.
class SapiLibarchiveSandboxCreate : public LibarchiveSandbox {
public:
SapiLibarchiveSandboxCreate(const std::vector<std::string>& files,
absl::string_view archive_path)
: files_(files), archive_path_(archive_path) {}
private:
std::unique_ptr<sandbox2::Policy> ModifyPolicy(
sandbox2::PolicyBuilder*) override {
sandbox2::PolicyBuilder policy =
sandbox2::PolicyBuilder()
.AddDirectoryAt(archive_path_, "/output", false)
.AllowRead()
.AllowWrite()
.AllowOpen()
.AllowSystemMalloc()
.AllowGetIDs()
.AllowSafeFcntl()
.AllowStat()
.AllowExit()
.AllowSyscalls({
__NR_futex,
__NR_lseek,
__NR_close,
__NR_gettid,
__NR_umask,
__NR_utimensat,
__NR_unlink,
__NR_mkdir,
__NR_fstatfs,
__NR_socket,
__NR_connect,
__NR_flistxattr,
__NR_recvmsg,
__NR_getdents64,
})
// Allow ioctl only for FS_IOC_GETFLAGS.
.AddPolicyOnSyscall(__NR_ioctl,
{ARG(1), JEQ(FS_IOC_GETFLAGS, ALLOW)});
// We check whether the entry is a file or a directory.
for (const auto& i : files_) {
struct stat s;
CHECK(stat(i.c_str(), &s) == 0) << "Could not stat " << i;
if (S_ISDIR(s.st_mode)) {
policy = policy.AddDirectory(i);
} else {
policy = policy.AddFile(i);
}
}
return policy.BuildOrDie();
}
const std::vector<std::string> files_;
absl::string_view archive_path_;
};
// When an archive is extracted, the generated files/directories will be placed
// relative to the current working directory. In order to add permissions to
// this we create a temporary directory at every extraction. Then, we change the
// directory of the sandboxed process to that directory and map it to the
// current "real" working directory. This way the contents of the archived will
// pe placed correctly without offering additional permission.
class SapiLibarchiveSandboxExtract : public LibarchiveSandbox {
public:
SapiLibarchiveSandboxExtract(absl::string_view archive_path, int do_extract,
absl::string_view tmp_dir)
: archive_path_(archive_path),
do_extract_(do_extract),
tmp_dir_(tmp_dir) {}
private:
void ModifyExecutor(sandbox2::Executor* executor) override {
// If the user only wants to list the entries in the archive, we do
// not need to worry about changing directories;
if (do_extract_) {
executor->set_cwd(std::string(tmp_dir_));
}
}
std::unique_ptr<sandbox2::Policy> ModifyPolicy(
sandbox2::PolicyBuilder*) override {
sandbox2::PolicyBuilder policy = sandbox2::PolicyBuilder()
.AllowRead()
.AllowWrite()
.AllowOpen()
.AllowSystemMalloc()
.AllowGetIDs()
.AllowSafeFcntl()
.AllowStat()
.AllowExit()
.AllowSyscalls({
__NR_futex,
__NR_lseek,
__NR_close,
__NR_gettid,
__NR_umask,
__NR_utimensat,
__NR_unlink,
__NR_mkdir,
})
.AddFile(archive_path_);
if (do_extract_) {
// Get the real cwd and map it to the temporary directory in which
// the sandboxed process takes place().
std::string cwd = sandbox2::file_util::fileops::GetCWD();
policy = policy.AddDirectoryAt(cwd, tmp_dir_, false);
}
return policy.BuildOrDie();
}
absl::string_view archive_path_;
absl::string_view tmp_dir_;
const int do_extract_;
};
#endif // SAPI_LIBARCHIVE_EXAMPLES_SANDBOX_H

View File

@ -0,0 +1,540 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sapi_minitar.h" // NOLINT(build/include)
#include "absl/status/status.h"
#include "sandboxed_api/sandbox2/util/path.h"
#include "sandboxed_api/util/status_macros.h"
absl::Status CreateArchive(const char* initial_filename, int compress,
const char** argv, bool verbose) {
// We split the filename path into dirname and filename. To the filename we
// prepend "/output/"" so that it will work with the security policy.
std::string abs_path = MakeAbsolutePathAtCWD(std::string(initial_filename));
auto [archive_path, filename_tmp] =
std::move(sandbox2::file::SplitPath(abs_path));
std::string filename = sandbox2::file::JoinPath("/output/", filename_tmp);
std::vector<std::string> absolute_paths;
sandbox2::util::CharPtrArrToVecString(const_cast<char* const*>(argv),
&absolute_paths);
std::vector<std::string> relative_paths = absolute_paths;
std::transform(absolute_paths.begin(), absolute_paths.end(),
absolute_paths.begin(), MakeAbsolutePathAtCWD);
std::transform(relative_paths.begin(), relative_paths.end(),
relative_paths.begin(), sandbox2::file::CleanPath);
// At this point, we have the relative and absolute paths (cleaned) saved
// in vectors.
// Initialize sandbox and api objects.
SapiLibarchiveSandboxCreate sandbox(absolute_paths, archive_path);
SAPI_RETURN_IF_ERROR(sandbox.Init());
LibarchiveApi api(&sandbox);
SAPI_ASSIGN_OR_RETURN(archive * ret_archive, api.archive_write_new());
if (ret_archive == nullptr) {
return absl::FailedPreconditionError("Failed to create write archive");
}
// Treat the pointer as remote. There is no need to copy the data
// to the client process.
sapi::v::RemotePtr a(ret_archive);
int rc;
std::string msg;
// switch (compress) {
// case 'j':
// case 'y':
if (compress == 'j' || compress == 'y') {
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_add_filter_bzip2(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_add_filter_bzip2 call");
}
// break;
} else if (compress == 'Z') {
// case 'Z':
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_add_filter_compress(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_add_filter_compress call");
}
// break;
} else if (compress == 'z') {
// case 'z':
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_add_filter_gzip(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_add_filter_gzip call");
}
// break;
} else {
// default:
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_add_filter_none(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_add_filter_none call");
}
// break;
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_set_format_ustar(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_set_format_ustar call");
}
const char* filename_ptr = filename.data();
if (filename_ptr != nullptr && strcmp(filename_ptr, "-") == 0) {
filename_ptr = nullptr;
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_open_filename(
&a, sapi::v::ConstCStr(filename_ptr).PtrBefore()));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_open_filename call");
}
int file_idx = 0;
// We can directly use the vectors defined before.
for (int file_idx = 0; file_idx < absolute_paths.size(); ++file_idx) {
SAPI_ASSIGN_OR_RETURN(ret_archive, api.archive_read_disk_new());
if (ret_archive == nullptr) {
return absl::FailedPreconditionError(
"Failed to create read_disk archive");
}
sapi::v::RemotePtr disk(ret_archive);
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_disk_set_standard_lookup(&disk));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_disk_set_standard_lookup call");
}
// We use the absolute path first.
SAPI_ASSIGN_OR_RETURN(
rc,
api.archive_read_disk_open(
&disk,
sapi::v::ConstCStr(absolute_paths[file_idx].c_str()).PtrBefore()));
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_error_string(&disk), sandbox));
return absl::FailedPreconditionError(msg);
}
while (true) {
archive_entry* ret_archive_entry;
SAPI_ASSIGN_OR_RETURN(ret_archive_entry, api.archive_entry_new());
if (ret_archive_entry == nullptr) {
return absl::FailedPreconditionError("Failed to create archive_entry");
}
sapi::v::RemotePtr entry(ret_archive_entry);
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_next_header2(&disk, &entry));
if (rc == ARCHIVE_EOF) {
break;
}
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_error_string(&disk), sandbox));
return absl::FailedPreconditionError(msg);
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_disk_descend(&disk));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError("read_disk_descend call failed");
}
// After using the absolute path before, we now need to add the pathname
// to the archive entry. This would help store the files by their relative
// paths(similar to the usual tar command).
// However, in the case where a directory is added to the archive,
// all of the files inside of it are added as well so we replace the
// absolute path prefix with the relative one.
// Example:
// we add the folder "test_files" which becomes
// "/absolute/path/test_files" and the files inside of it will become
// similar to "/absolute/path/test_files/file1"
// which we then change to "test_files/file1" so that it is relative.
SAPI_ASSIGN_OR_RETURN(
std::string path_name,
CheckStatusAndGetString(api.archive_entry_pathname(&entry), sandbox));
path_name.replace(path_name.begin(),
path_name.begin() + absolute_paths[file_idx].length(),
relative_paths[file_idx]);
// On top of those changes, we need to remove leading '/' characters
// and also remove everything up to the last occurrence of '../'.
size_t found = path_name.find_first_not_of("/");
if (found != std::string::npos) {
path_name.erase(path_name.begin(), path_name.begin() + found);
}
// Search either for the last '/../' or check if
// the path has '../' in the beginning.
found = path_name.rfind("/../");
if (found != std::string::npos) {
path_name = path_name.substr(found + 4);
} else if (path_name.substr(0, 3) == "../") {
path_name = path_name.substr(3);
}
SAPI_RETURN_IF_ERROR(api.archive_entry_set_pathname(
&entry, sapi::v::ConstCStr(path_name.c_str()).PtrBefore()));
if (verbose) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_entry_pathname(&entry), sandbox));
std::cout << msg << std::endl;
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_header(&a, &entry));
if (rc < ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_error_string(&a), sandbox));
std::cout << msg << std::endl;
}
if (rc == ARCHIVE_FATAL) {
return absl::FailedPreconditionError(
"Unexpected result from write_header call");
}
// In the following section, the calls (read, archive_write_data) are done
// on the sandboxed process since we do not need to transfer the data in
// the client process.
if (rc > ARCHIVE_FAILED) {
SAPI_ASSIGN_OR_RETURN(
msg, CheckStatusAndGetString(api.archive_entry_sourcepath(&entry),
sandbox));
int fd = open(msg.c_str(), O_RDONLY);
if (fd < 0) {
return absl::FailedPreconditionError("Could not open file");
}
sapi::v::Fd sapi_fd(fd);
sapi::v::Int read_ret;
sapi::v::Array<char> buff(kBuffSize);
sapi::v::UInt ssize(kBuffSize);
// We allocate the buffer remotely and then we can simply use the
// remote pointer(with PtrNone).
// This allows us to keep the data in the remote process without always
// transferring the memory.
SAPI_RETURN_IF_ERROR(sandbox.Allocate(&buff, true));
// We can use sapi methods that help us with file descriptors.
SAPI_RETURN_IF_ERROR(sandbox.TransferToSandboxee(&sapi_fd));
SAPI_RETURN_IF_ERROR(
sandbox.Call("read", &read_ret, &sapi_fd, buff.PtrNone(), &ssize));
while (read_ret.GetValue() > 0) {
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_data(&a, buff.PtrNone(),
read_ret.GetValue()));
SAPI_RETURN_IF_ERROR(sandbox.Call("read", &read_ret, &sapi_fd,
buff.PtrNone(), &ssize));
}
// sapi_fd variable goes out of scope here so both the local and the
// remote file descriptors are closed.
}
SAPI_RETURN_IF_ERROR(api.archive_entry_free(&entry));
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_close(&disk));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_close call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_free(&disk));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_free call");
}
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_close(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_close call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_free(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_free call");
}
return absl::OkStatus();
}
absl::Status ExtractArchive(const char* filename, int do_extract, int flags,
bool verbose) {
std::string tmp_dir;
if (do_extract) {
SAPI_ASSIGN_OR_RETURN(tmp_dir, CreateTempDirAtCWD());
}
// We can use a struct like this in order to delete the temporary
// directory that was created earlier whenever the function ends.
struct ExtractTempDirectoryCleanup {
ExtractTempDirectoryCleanup(const std::string& dir) : dir_(dir) {}
~ExtractTempDirectoryCleanup() {
sandbox2::file_util::fileops::DeleteRecursively(dir_);
}
private:
std::string dir_;
};
// We should only delete it if the do_extract flag is true which
// means that this struct is instantiated only in that case.
auto cleanup_ptr =
do_extract ? absl::make_unique<ExtractTempDirectoryCleanup>(tmp_dir)
: nullptr;
std::string filename_absolute = MakeAbsolutePathAtCWD(filename);
// Initialize sandbox and api objects.
SapiLibarchiveSandboxExtract sandbox(filename_absolute, do_extract, tmp_dir);
SAPI_RETURN_IF_ERROR(sandbox.Init());
LibarchiveApi api(&sandbox);
SAPI_ASSIGN_OR_RETURN(archive * ret_archive, api.archive_read_new());
if (ret_archive == nullptr) {
return absl::FailedPreconditionError("Failed to create read archive");
}
sapi::v::RemotePtr a(ret_archive);
SAPI_ASSIGN_OR_RETURN(ret_archive, api.archive_write_disk_new());
if (ret_archive == nullptr) {
return absl::FailedPreconditionError("Failed to create write disk archive");
}
sapi::v::RemotePtr ext(ret_archive);
int rc;
std::string msg;
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_disk_set_options(&ext, flags));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_disk_set_options call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_support_filter_bzip2(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_support_filter_bzip2 call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_support_filter_gzip(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_suppport_filter_gzip call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_support_filter_compress(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_support_filter_compress call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_support_format_tar(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result fromread_support_format_tar call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_support_format_cpio(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_support_format_tar call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_disk_set_standard_lookup(&ext));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_disk_set_standard_lookup call");
}
const char* filename_ptr = filename_absolute.c_str();
if (filename_ptr != nullptr && strcmp(filename_ptr, "-") == 0) {
filename_ptr = nullptr;
}
// The entries are saved with a relative path so they are all created
// relative to the current working directory.
SAPI_ASSIGN_OR_RETURN(
rc, api.archive_read_open_filename(
&a, sapi::v::ConstCStr(filename_ptr).PtrBefore(), kBlockSize));
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(
msg, CheckStatusAndGetString(api.archive_error_string(&a), sandbox));
return absl::FailedPreconditionError(msg);
}
while (true) {
sapi::v::IntBase<archive_entry*> entry_ptr_tmp(0);
SAPI_ASSIGN_OR_RETURN(
rc, api.archive_read_next_header(&a, entry_ptr_tmp.PtrAfter()));
if (rc == ARCHIVE_EOF) {
break;
}
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(
msg, CheckStatusAndGetString(api.archive_error_string(&a), sandbox));
return absl::FailedPreconditionError(msg);
}
sapi::v::RemotePtr entry(entry_ptr_tmp.GetValue());
if (verbose && do_extract) {
std::cout << "x ";
}
if (verbose || !do_extract) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_entry_pathname(&entry), sandbox));
std::cout << msg << std::endl;
}
if (do_extract) {
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_header(&ext, &entry));
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(msg, CheckStatusAndGetString(
api.archive_error_string(&a), sandbox));
std::cout << msg << std::endl;
} else {
SAPI_ASSIGN_OR_RETURN(rc, CopyData(&a, &ext, api, sandbox));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Failed to copy data between archive structs.");
}
}
}
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_close(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected value from read_close call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_read_free(&a));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from read_free call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_close(&ext));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_close call");
}
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_free(&ext));
if (rc != ARCHIVE_OK) {
return absl::FailedPreconditionError(
"Unexpected result from write_free call");
}
return absl::OkStatus();
}
absl::StatusOr<int> CopyData(sapi::v::RemotePtr* ar, sapi::v::RemotePtr* aw,
LibarchiveApi& api,
SapiLibarchiveSandboxExtract& sandbox) {
int rc;
std::string msg;
sapi::v::IntBase<archive_entry*> buff_ptr_tmp(0);
sapi::v::ULLong size;
sapi::v::SLLong offset;
while (true) {
SAPI_ASSIGN_OR_RETURN(
rc, api.archive_read_data_block(ar, buff_ptr_tmp.PtrAfter(),
size.PtrAfter(), offset.PtrAfter()));
if (rc == ARCHIVE_EOF) {
return ARCHIVE_OK;
}
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(
msg, CheckStatusAndGetString(api.archive_error_string(ar), sandbox));
std::cout << msg << std::endl;
return rc;
}
sapi::v::RemotePtr buff(buff_ptr_tmp.GetValue());
SAPI_ASSIGN_OR_RETURN(rc, api.archive_write_data_block(
aw, &buff, size.GetValue(), offset.GetValue()));
if (rc != ARCHIVE_OK) {
SAPI_ASSIGN_OR_RETURN(
msg, CheckStatusAndGetString(api.archive_error_string(ar), sandbox));
std::cout << msg << std::endl;
return rc;
}
}
}
std::string MakeAbsolutePathAtCWD(const std::string& path) {
std::string result = sandbox2::file_util::fileops::MakeAbsolute(
path, sandbox2::file_util::fileops::GetCWD());
CHECK(result != "") << "Could not create absolute path for: " << path;
return sandbox2::file::CleanPath(result);
}
absl::StatusOr<std::string> CheckStatusAndGetString(
const absl::StatusOr<char*>& status, LibarchiveSandbox& sandbox) {
SAPI_ASSIGN_OR_RETURN(char* str, status);
if (str == nullptr) {
return absl::FailedPreconditionError("Could not get string from archive");
}
return sandbox.GetCString(sapi::v::RemotePtr(str));
}
absl::StatusOr<std::string> CreateTempDirAtCWD() {
std::string cwd = sandbox2::file_util::fileops::GetCWD();
CHECK(!cwd.empty()) << "Could not get current working directory";
cwd.append("/");
SAPI_ASSIGN_OR_RETURN(std::string result, sandbox2::CreateTempDir(cwd));
return result;
}

View File

@ -0,0 +1,63 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SAPI_LIBARCHIVE_EXAMPLES_MINITAR_H
#define SAPI_LIBARCHIVE_EXAMPLES_MINITAR_H
#include <archive.h>
#include <archive_entry.h>
#include <fcntl.h>
#include "libarchive_sapi.sapi.h" // NOLINT(build/include)
#include "sandbox.h" // NOLINT(build/include)
#include "sandboxed_api/sandbox2/util.h"
#include "sandboxed_api/sandbox2/util/path.h"
#include "sandboxed_api/sandbox2/util/temp_file.h"
// Creates an archive file at the given filename.
absl::Status CreateArchive(const char* filename, int compress,
const char** argv, bool verbose = true);
// Extracts an archive file. If do_extract is true, the files will
// be created relative to the current working directory. If do_extract
// is false then the function will just print the entries of the archive.
absl::Status ExtractArchive(const char* filename, int do_extract, int flags,
bool verbose = true);
// This function is only called from the "extract function". It is still
// isolated in order to not modify the code structure as much.
absl::StatusOr<int> CopyData(sapi::v::RemotePtr* ar, sapi::v::RemotePtr* aw,
LibarchiveApi& api,
SapiLibarchiveSandboxExtract& sandbox);
inline constexpr size_t kBlockSize = 10240;
inline constexpr size_t kBuffSize = 16384;
// Converts one string to an absolute path by prepending the current
// working directory to the relative path.
// The path is also cleaned at the end.
std::string MakeAbsolutePathAtCWD(const std::string& path);
// This function takes a status as argument and after checking the status
// it transfers the string. This is used mostly with archive_error_string
// and other library functions that return a char*.
absl::StatusOr<std::string> CheckStatusAndGetString(
const absl::StatusOr<char*>& status, LibarchiveSandbox& sandbox);
// Creates a temporary directory in the current working directory and
// returns the path. This is used in the extract function where the sandboxed
// process changes the current working directory to this temporary directory.
absl::StatusOr<std::string> CreateTempDirAtCWD();
#endif // SAPI_LIBARCHIVE_EXAMPLES_MINITAR_H

View File

@ -0,0 +1,130 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file contains the main function from the original minitar example:
// https://github.com/libarchive/libarchive/blob/master/examples/minitar/minitar.c
// Most of the logic is the same, it was only simplified a bit since this is
// only used for the command line tool.
// No sandboxing takes place in this function.
#include <iostream>
#include "sapi_minitar.h" // NOLINT(build/include)
static void PrintUsage() {
/* Many program options depend on compile options. */
const char* m =
"Usage: minitar [-"
"c"
"j"
"tvx"
"y"
"Z"
"z"
"] [-f file] [file]\n";
std::cout << m << std::endl;
exit(EXIT_FAILURE);
}
int main(int unused_argc, const char** argv) {
google::InitGoogleLogging(argv[0]);
const char* filename = nullptr;
int compress;
int flags;
int mode;
int opt;
mode = 'x';
int verbose = 0;
compress = '\0';
flags = ARCHIVE_EXTRACT_TIME;
while (*++argv != nullptr && **argv == '-') {
const char* p = *argv + 1;
while ((opt = *p++) != '\0') {
switch (opt) {
case 'c':
mode = opt;
break;
case 'f':
if (*p != '\0')
filename = p;
else
filename = *++argv;
p += strlen(p);
break;
case 'j':
compress = opt;
break;
case 'p':
flags |= ARCHIVE_EXTRACT_PERM;
flags |= ARCHIVE_EXTRACT_ACL;
flags |= ARCHIVE_EXTRACT_FFLAGS;
break;
case 't':
mode = opt;
break;
case 'v':
verbose++;
break;
case 'x':
mode = opt;
break;
case 'y':
compress = opt;
break;
case 'Z':
compress = opt;
break;
case 'z':
compress = opt;
break;
default:
PrintUsage();
}
}
}
absl::Status status;
switch (mode) {
case 'c':
status = CreateArchive(filename, compress, argv, verbose);
if (!status.ok()) {
LOG(ERROR) << "Archive creation failed with message: "
<< status.message();
return EXIT_FAILURE;
}
break;
case 't':
status = ExtractArchive(filename, 0, flags, verbose);
if (!status.ok()) {
LOG(ERROR) << "Archive extraction failed with message: "
<< status.message();
return EXIT_FAILURE;
}
break;
case 'x':
status = ExtractArchive(filename, 1, flags, verbose);
if (!status.ok()) {
LOG(ERROR) << "Archive extraction failed with message: "
<< status.message();
return EXIT_FAILURE;
}
break;
}
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,37 @@
archive_entry_free
archive_entry_new
archive_entry_pathname
archive_entry_sourcepath
archive_error_string
archive_read_close
archive_read_data_block
archive_read_disk_descend
archive_read_disk_new
archive_read_disk_open
archive_read_disk_set_standard_lookup
archive_read_free
archive_read_new
archive_read_next_header
archive_read_next_header2
archive_read_open_filename
archive_read_support_filter_bzip2
archive_read_support_filter_compress
archive_read_support_filter_gzip
archive_read_support_format_cpio
archive_read_support_format_tar
archive_write_add_filter_bzip2
archive_write_add_filter_compress
archive_write_add_filter_gzip
archive_write_add_filter_none
archive_write_close
archive_write_data
archive_write_data_block
archive_write_disk_new
archive_write_disk_set_options
archive_write_disk_set_standard_lookup
archive_write_free
archive_write_header
archive_write_new
archive_write_open_filename
archive_write_set_format_ustar
archive_entry_set_pathname

View File

@ -0,0 +1,18 @@
--- archive_virtual.c 2020-09-11 16:39:07.158014139 +0000
+++ archive_virtual2.c 2020-09-11 16:39:50.842107856 +0000
@@ -124,13 +124,13 @@
return ((a->vtable->archive_write_finish_entry)(a));
}
-la_ssize_t
+int
archive_write_data(struct archive *a, const void *buff, size_t s)
{
return ((a->vtable->archive_write_data)(a, buff, s));
}
-la_ssize_t
+int
archive_write_data_block(struct archive *a, const void *buff, size_t s,
la_int64_t o)
{

View File

@ -0,0 +1,16 @@
--- archive.h 2020-09-11 14:23:21.758842500 +0000
+++ archive2.h 2020-09-11 14:20:27.310494460 +0000
@@ -840,11 +840,11 @@
*/
__LA_DECL int archive_write_header(struct archive *,
struct archive_entry *);
-__LA_DECL la_ssize_t archive_write_data(struct archive *,
+__LA_DECL int archive_write_data(struct archive *,
const void *, size_t);
/* This interface is currently only available for archive_write_disk handles. */
-__LA_DECL la_ssize_t archive_write_data_block(struct archive *,
+__LA_DECL int archive_write_data_block(struct archive *,
const void *, size_t, la_int64_t);
__LA_DECL int archive_write_finish_entry(struct archive *);

View File

@ -0,0 +1,29 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(GoogleTest)
enable_testing()
add_executable(sapi_minitar_test
minitar_test.cc
)
target_link_libraries(sapi_minitar_test PRIVATE
sapi_minitar_lib
gtest
sapi::test_main
)
gtest_discover_tests(sapi_minitar_test)

View File

@ -0,0 +1,309 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <fstream>
#include "sapi_minitar.h" // NOLINT(build/include)
#include "gtest/gtest.h"
#include "sandboxed_api/sandbox2/util/path.h"
#include "sandboxed_api/util/status_matchers.h"
using ::sandbox2::file::JoinPath;
using ::sapi::IsOk;
using ::testing::Eq;
using ::testing::IsTrue;
using ::testing::StrEq;
using ::sandbox2::file_util::fileops::Exists;
using ::sandbox2::util::VecStringToCharPtrArr;
namespace {
// We will use a fixture class for testing which allows us to override the
// SetUp and TearDown functions. Also, data that needs to be initialized
// or destroyed only once (the test files and directories) will be handled
// in the SetUpTestSuite and TearDownTestSuite functions which are executed
// only once.
// All of the testing data will be placed in a temporary directory and each
// test will have it's own temporary directory. At the end of each test
// and all of the tests, the temporary data is deleted.
class MiniTarTest : public ::testing::Test {
protected:
// Before running the tests, we create a temporary directory which will
// store generated files and directories used for testing.
// The directory will look as follows:
// -file1
// -dir1 - file2
// - dir2 - file3
static void SetUpTestSuite() {
absl::StatusOr<std::string> tmp_status = CreateTempDirAtCWD();
ASSERT_THAT(tmp_status, IsOk());
data_dir_ = new std::string(std::move(tmp_status).value());
init_wd_ = new std::string(sandbox2::file_util::fileops::GetCWD());
ASSERT_THAT(Exists(data_dir_, false), IsTrue())
<< "Test data directory was not created";
ASSERT_THAT(chdir(data_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
CreateAndWriteToFile(kFile1);
ASSERT_THAT(mkdir(kDir1.data(), 0755), Eq(0)) << "Could not create dir1";
CreateAndWriteToFile(kFile2);
ASSERT_THAT(mkdir(kDir2.data(), 0755), Eq(0)) << "Could not create dir2";
CreateAndWriteToFile(kFile3);
test_count_ = 0;
}
static void TearDownTestSuite() {
// The tests have the data directory as their working directory at the end
// so we move to the initial working directory in order to not delete the
// directory that we are inside of.
ASSERT_THAT(chdir(init_wd_->data()), Eq(0))
<< "Could not chdir into initial working directory";
EXPECT_THAT(sandbox2::file_util::fileops::DeleteRecursively(*data_dir_),
IsTrue)
<< "Error during test data deletion";
delete init_wd_;
delete data_dir_;
}
void SetUp() override {
// We use a unique id based on test count to make sure that files created
// during tests do not overlap.
id_ = "test" + std::to_string(test_count_);
absl::StatusOr<std::string> tmp_status = CreateTempDirAtCWD();
ASSERT_THAT(tmp_status, IsOk());
tmp_dir_ = tmp_status.value();
ASSERT_THAT(Exists(tmp_dir_, false), IsTrue)
<< "Could not create test specific temporary directory";
ASSERT_THAT(chdir(data_dir_->data()), Eq(0))
<< "Could not chdir into test data directory";
}
void TearDown() override {
// Move to another directory before deleting the temporary folder.
ASSERT_THAT(chdir(data_dir_->data()), Eq(0))
<< "Could not chdir into test data directory";
EXPECT_THAT(sandbox2::file_util::fileops::DeleteRecursively(tmp_dir_),
IsTrue)
<< "Error during test temporary directory deletion";
++test_count_;
}
// Creates the file specified and writes the same filename.
// This is done in order to not have completely empty files for the
// archiving step.
static void CreateAndWriteToFile(absl::string_view file) {
std::ofstream fin(file.data());
ASSERT_THAT(fin.is_open(), IsTrue()) << "Could not create" << file;
fin << file;
fin.close();
}
// Checks if the files exists and if the contents are correct.
// In these tests, each file contains the relative path from the test
// directory.
// Example: dir1/dir2/file3 will contain dir1/dir2/file3.
// What the files contain does not matter as much, the only important thing
// is that they are not empty so we can check if the contents are preserved.
static void CheckFile(const std::string& file) {
ASSERT_THAT(Exists(file, false), IsTrue()) << "Could not find " << file;
std::ifstream fin(file);
ASSERT_THAT(fin.is_open(), IsTrue()) << "Error when opening " << file;
std::string file_contents((std::istreambuf_iterator<char>(fin)),
std::istreambuf_iterator<char>());
EXPECT_THAT(file_contents, StrEq(file))
<< "Contents of " << file << " are different after extraction";
fin.close();
}
static int test_count_;
static std::string* data_dir_;
static std::string* init_wd_;
std::string tmp_dir_;
std::string id_;
static constexpr absl::string_view kFile1 = "file1";
static constexpr absl::string_view kFile2 = "dir1/file2";
static constexpr absl::string_view kFile3 = "dir1/dir2/file3";
static constexpr absl::string_view kDir1 = "dir1";
static constexpr absl::string_view kDir2 = "dir1/dir2";
};
int MiniTarTest::test_count_;
std::string* MiniTarTest::data_dir_;
std::string* MiniTarTest::init_wd_;
// The tests have the following pattern:
// 1) From inside the test data directory, call the create function with
// different arguments.
// 2) Move to the test specific temporary directory created during the
// set up phase.
// 3) Extract the archive created at step 1.
// 4) Check that the files in the archive have been extracted correctly
// by first checking if they exist and then checking if the content is the
// same as in the original file.
TEST_F(MiniTarTest, TestFileSimple) {
std::vector<std::string> v = {kFile1.data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
}
TEST_F(MiniTarTest, TestMultipleFiles) {
std::vector<std::string> v = {kFile1.data(), kFile2.data(), kFile3.data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(Exists(id_.data(), false), IsTrue())
<< "Archive file was not created";
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestDirectorySimple) {
std::vector<std::string> v = {kDir2.data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestDirectoryNested) {
std::vector<std::string> v = {kDir1.data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestComplex) {
std::vector<std::string> v = {kFile1.data(), kDir1.data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestCompress) {
std::vector<std::string> v = {kFile1.data(), kDir1.data()};
int compress = 'Z';
ASSERT_THAT(
CreateArchive(id_.data(), compress, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestGZIP) {
std::vector<std::string> v = {kFile1.data(), kDir1.data()};
int compress = 'z';
ASSERT_THAT(
CreateArchive(id_.data(), compress, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestBZIP2) {
std::vector<std::string> v = {kFile1.data(), kDir1.data()};
int compress = 'j';
ASSERT_THAT(
CreateArchive(id_.data(), compress, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
TEST_F(MiniTarTest, TestPaths) {
// These should be equivalent to kFile1 and kDir1 after cleaning.
std::vector<std::string> v = {JoinPath("a/b/../../c/../", kFile1).data(),
JoinPath("d/../e/././///../", kDir1).data()};
ASSERT_THAT(CreateArchive(id_.data(), 0, VecStringToCharPtrArr(v), false),
IsOk());
ASSERT_THAT(chdir(tmp_dir_.data()), Eq(0))
<< "Could not chdir into test data directory";
ASSERT_THAT(ExtractArchive(JoinPath(*data_dir_, id_).data(), 1, 0, false),
IsOk());
CheckFile(std::string(kFile1));
CheckFile(std::string(kFile2));
CheckFile(std::string(kFile3));
}
} // namespace