Skip to content

Commit

Permalink
Merge branch 'main' into jarred/binbunlock
Browse files Browse the repository at this point in the history
  • Loading branch information
dylan-conway committed Dec 15, 2024
2 parents 145bef8 + 3ce6ffa commit 55cee64
Show file tree
Hide file tree
Showing 5 changed files with 84 additions and 47 deletions.
6 changes: 3 additions & 3 deletions src/bun.js/node/node_os.zig
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ pub const OS = struct {
if (std.fs.openFileAbsolute("/proc/stat", .{})) |file| {
defer file.close();

const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf).unwrap();
const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf, true).unwrap();
defer file_buf.clearRetainingCapacity();
const contents = file_buf.items[0..read];

Expand Down Expand Up @@ -124,7 +124,7 @@ pub const OS = struct {
if (std.fs.openFileAbsolute("/proc/cpuinfo", .{})) |file| {
defer file.close();

const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf).unwrap();
const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf, true).unwrap();
defer file_buf.clearRetainingCapacity();
const contents = file_buf.items[0..read];

Expand Down Expand Up @@ -175,7 +175,7 @@ pub const OS = struct {
if (std.fs.openFileAbsolute(path, .{})) |file| {
defer file.close();

const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf).unwrap();
const read = try bun.sys.File.from(file).readToEndWithArrayList(&file_buf, true).unwrap();
defer file_buf.clearRetainingCapacity();
const contents = file_buf.items[0..read];

Expand Down
84 changes: 52 additions & 32 deletions src/install/install.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1139,19 +1139,14 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type {
var git_tag_stack_fallback = std.heap.stackFallback(2048, bun.default_allocator);
const allocator = git_tag_stack_fallback.get();

var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false;
defer {
if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close();
}

const bun_tag_file = File.readFrom(
destination_dir,
var bun_tag_file = this.node_modules.readSmallFile(
root_node_modules_dir,
bun_tag_path,
allocator,
).unwrap() catch return false;
defer allocator.free(bun_tag_file);
) catch return .{};
defer bun_tag_file.bytes.deinit();

return strings.eqlLong(repo.resolved.slice(this.lockfile.buffers.string_bytes.items), bun_tag_file, true);
return strings.eqlLong(repo.resolved.slice(this.lockfile.buffers.string_bytes.items), bun_tag_file.bytes.items, true);
}

pub fn verify(
Expand Down Expand Up @@ -1179,10 +1174,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type {
// Only check for destination directory in node_modules. We can't use package.json because
// it might not exist
fn verifyTransitiveSymlinkedFolder(this: *@This(), root_node_modules_dir: std.fs.Dir) bool {
var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false;
defer destination_dir.close();

return bun.sys.directoryExistsAt(destination_dir.fd, this.destination_dir_subpath).unwrap() catch false;
return this.node_modules.directoryExistsAt(root_node_modules_dir, this.destination_dir_subpath);
}

fn getInstalledPackageJsonSource(
Expand All @@ -1200,7 +1192,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type {
const package_json_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len :0];
defer this.destination_dir_subpath_buf[this.destination_dir_subpath.len] = 0;

var package_json_file = this.node_modules.openPackageJSON(root_node_modules_dir, package_json_path) catch return null;
var package_json_file = this.node_modules.openFile(root_node_modules_dir, package_json_path) catch return null;
defer package_json_file.close();

// Heuristic: most package.jsons will be less than 2048 bytes.
Expand Down Expand Up @@ -12081,16 +12073,49 @@ pub const PackageManager = struct {
this.path.clearAndFree();
}

noinline fn openPackageJSONFileWithoutOpeningDirectories(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, package_json_path: [:0]const u8) bun.sys.Maybe(bun.sys.File) {
// Since the stack size of these functions are rather large, let's not let them be inlined.
noinline fn directoryExistsAtWithoutOpeningDirectories(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8) bool {
var path_buf: bun.PathBuffer = undefined;
const parts: [2][]const u8 = .{ this.path.items, package_json_path };
const parts: [2][]const u8 = .{ this.path.items, file_path };
return bun.sys.directoryExistsAt(bun.toFD(root_node_modules_dir), bun.path.joinZBuf(&path_buf, &parts, .auto)).unwrapOr(false);
}

pub fn directoryExistsAt(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8) bool {
if (file_path.len + this.path.items.len * 2 < bun.MAX_PATH_BYTES) {
return this.directoryExistsAtWithoutOpeningDirectories(root_node_modules_dir, file_path);
}

const dir = this.openDir(root_node_modules_dir) catch return false;
defer {
_ = bun.sys.close(bun.toFD(dir));
}

return bun.sys.directoryExistsAt(bun.toFD(dir), file_path).unwrapOr(false);
}

// Since the stack size of these functions are rather large, let's not let them be inlined.
noinline fn openFileWithoutOpeningDirectories(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8) bun.sys.Maybe(bun.sys.File) {
var path_buf: bun.PathBuffer = undefined;
const parts: [2][]const u8 = .{ this.path.items, file_path };
return bun.sys.File.openat(bun.toFD(root_node_modules_dir), bun.path.joinZBuf(&path_buf, &parts, .auto), bun.O.RDONLY, 0);
}

pub fn openPackageJSON(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, package_json_path: [:0]const u8) !bun.sys.File {
if (this.path.items.len + package_json_path.len * 2 < bun.MAX_PATH_BYTES) {
pub fn readFile(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8, allocator: std.mem.Allocator) !bun.sys.File.ReadToEndResult {
const file = try this.openFile(root_node_modules_dir, file_path);
defer file.close();
return file.readToEnd(allocator);
}

pub fn readSmallFile(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8, allocator: std.mem.Allocator) !bun.sys.File.ReadToEndResult {
const file = try this.openFile(root_node_modules_dir, file_path);
defer file.close();
return file.readToEndSmall(allocator);
}

pub fn openFile(this: *const NodeModulesFolder, root_node_modules_dir: std.fs.Dir, file_path: [:0]const u8) !bun.sys.File {
if (this.path.items.len + file_path.len * 2 < bun.MAX_PATH_BYTES) {
// If we do not run the risk of ENAMETOOLONG, then let's just avoid opening the extra directories altogether.
switch (this.openPackageJSONFileWithoutOpeningDirectories(root_node_modules_dir, package_json_path)) {
switch (this.openFileWithoutOpeningDirectories(root_node_modules_dir, file_path)) {
.err => |e| {
switch (e.getErrno()) {
// Just incase we're wrong, let's try the fallback
Expand All @@ -12109,7 +12134,7 @@ pub const PackageManager = struct {
_ = bun.sys.close(bun.toFD(dir));
}

return try bun.sys.File.openat(bun.toFD(dir), package_json_path, bun.O.RDONLY, 0).unwrap();
return try bun.sys.File.openat(bun.toFD(dir), file_path, bun.O.RDONLY, 0).unwrap();
}

pub fn openDir(this: *const NodeModulesFolder, root: std.fs.Dir) !std.fs.Dir {
Expand Down Expand Up @@ -12244,18 +12269,13 @@ pub const PackageManager = struct {

this.completed_trees.set(tree_id);

if (maybe_destination_dir) |maybe| {
if (maybe.getDir() catch null) |_destination_dir| {
var destination_dir = _destination_dir;
defer {
if (maybe_destination_dir == null) {
destination_dir.close();
}
}

this.seen_bin_links.clearRetainingCapacity();
// Avoid opening this directory if we don't need to.
if (tree.binaries.count() > 0) {
// Don't close this directory in here. It will be closed by the caller.
if (maybe_destination_dir) |maybe| {
if (maybe.getDir() catch null) |destination_dir| {
this.seen_bin_links.clearRetainingCapacity();

if (tree.binaries.count() > 0) {
var link_target_buf: bun.PathBuffer = undefined;
var link_dest_buf: bun.PathBuffer = undefined;
var link_rel_buf: bun.PathBuffer = undefined;
Expand Down
2 changes: 1 addition & 1 deletion src/install/resolvers/folder_resolver.zig
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ pub const FolderResolution = union(Tag) {
body.data.reset();
var man = body.data.list.toManaged(manager.allocator);
defer body.data.list = man.moveToUnmanaged();
_ = try file.readToEndWithArrayList(&man).unwrap();
_ = try file.readToEndWithArrayList(&man, true).unwrap();
}

break :brk logger.Source.initPathString(abs, body.data.list.items);
Expand Down
2 changes: 1 addition & 1 deletion src/io/PipeReader.zig
Original file line number Diff line number Diff line change
Expand Up @@ -806,7 +806,7 @@ const PosixBufferedReader = struct {
pub fn finalBuffer(this: *PosixBufferedReader) *std.ArrayList(u8) {
if (this.flags.memfd and this.handle == .fd) {
defer this.handle.close(null, {});
_ = bun.sys.File.readToEndWithArrayList(.{ .handle = this.handle.fd }, this.buffer()).unwrap() catch |err| {
_ = bun.sys.File.readToEndWithArrayList(.{ .handle = this.handle.fd }, this.buffer(), false).unwrap() catch |err| {
bun.Output.debugWarn("error reading from memfd\n{}", .{err});
return this.buffer();
};
Expand Down
37 changes: 27 additions & 10 deletions src/sys.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3411,15 +3411,19 @@ pub const File = struct {
return .{ .result = buf[0..read_amount] };
}

pub fn readToEndWithArrayList(this: File, list: *std.ArrayList(u8)) Maybe(usize) {
const size = switch (this.getEndPos()) {
.err => |err| {
return .{ .err = err };
},
.result => |s| s,
};

list.ensureTotalCapacityPrecise(size + 16) catch bun.outOfMemory();
pub fn readToEndWithArrayList(this: File, list: *std.ArrayList(u8), probably_small: bool) Maybe(usize) {
if (probably_small) {
list.ensureUnusedCapacity(64) catch bun.outOfMemory();
} else {
list.ensureTotalCapacityPrecise(
switch (this.getEndPos()) {
.err => |err| {
return .{ .err = err };
},
.result => |s| s,
} + 16,
) catch bun.outOfMemory();
}

var total: i64 = 0;
while (true) {
Expand Down Expand Up @@ -3447,9 +3451,22 @@ pub const File = struct {

return .{ .result = @intCast(total) };
}

/// Use this function on potentially large files.
/// Calls fstat() on the file to get the size of the file and avoids reallocations + extra read() calls.
pub fn readToEnd(this: File, allocator: std.mem.Allocator) ReadToEndResult {
var list = std.ArrayList(u8).init(allocator);
return switch (readToEndWithArrayList(this, &list)) {
return switch (readToEndWithArrayList(this, &list, false)) {
.err => |err| .{ .err = err, .bytes = list },
.result => .{ .err = null, .bytes = list },
};
}

/// Use this function on small files < 1024 bytes.
/// This will skip the fstat() call.
pub fn readToEndSmall(this: File, allocator: std.mem.Allocator) ReadToEndResult {
var list = std.ArrayList(u8).init(allocator);
return switch (readToEndWithArrayList(this, &list, true)) {
.err => |err| .{ .err = err, .bytes = list },
.result => .{ .err = null, .bytes = list },
};
Expand Down

0 comments on commit 55cee64

Please sign in to comment.