mirror of
https://github.com/ziglang/zig.git
synced 2024-11-14 16:13:24 +00:00
enough code to get some examples working
This commit is contained in:
parent
7fc3fb955a
commit
25cf0b59d8
@ -447,7 +447,7 @@ fn runResource(
|
|||||||
const rand_int = std.crypto.random.int(u64);
|
const rand_int = std.crypto.random.int(u64);
|
||||||
const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
|
const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
|
||||||
|
|
||||||
const package_sub_path = blk: {
|
const package_sub_path, const size = blk: {
|
||||||
const tmp_directory_path = try cache_root.join(arena, &.{tmp_dir_sub_path});
|
const tmp_directory_path = try cache_root.join(arena, &.{tmp_dir_sub_path});
|
||||||
var tmp_directory: Cache.Directory = .{
|
var tmp_directory: Cache.Directory = .{
|
||||||
.path = tmp_directory_path,
|
.path = tmp_directory_path,
|
||||||
@ -499,12 +499,14 @@ fn runResource(
|
|||||||
// Empty directories have already been omitted by `unpackResource`.
|
// Empty directories have already been omitted by `unpackResource`.
|
||||||
// Compute the package hash based on the remaining files in the temporary
|
// Compute the package hash based on the remaining files in the temporary
|
||||||
// directory.
|
// directory.
|
||||||
f.actual_hash = try computeHash(f, pkg_path, filter);
|
f.actual_hash, const size = try computeHash(f, pkg_path, filter);
|
||||||
|
|
||||||
break :blk if (unpack_result.root_dir.len > 0)
|
const pkg_sub_path = if (unpack_result.root_dir.len > 0)
|
||||||
try fs.path.join(arena, &.{ tmp_dir_sub_path, unpack_result.root_dir })
|
try fs.path.join(arena, &.{ tmp_dir_sub_path, unpack_result.root_dir })
|
||||||
else
|
else
|
||||||
tmp_dir_sub_path;
|
tmp_dir_sub_path;
|
||||||
|
|
||||||
|
break :blk .{ pkg_sub_path, size };
|
||||||
};
|
};
|
||||||
|
|
||||||
// Rename the temporary directory into the global zig package cache
|
// Rename the temporary directory into the global zig package cache
|
||||||
@ -513,6 +515,14 @@ fn runResource(
|
|||||||
// by the system. This is done even if the hash is invalid, in case the
|
// by the system. This is done even if the hash is invalid, in case the
|
||||||
// package with the different hash is used in the future.
|
// package with the different hash is used in the future.
|
||||||
|
|
||||||
|
const name = f.manifest.?.name;
|
||||||
|
const semver = f.manifest.?.version;
|
||||||
|
const new_hash = Package.Manifest.NewHashDecoded.init(name, semver, @intCast(size), f.actual_hash);
|
||||||
|
const encoded = new_hash.encode();
|
||||||
|
std.debug.print("new hash: {s}\n", .{encoded.toSlice()});
|
||||||
|
|
||||||
|
if (true) std.process.exit(0);
|
||||||
|
|
||||||
f.package_root = .{
|
f.package_root = .{
|
||||||
.root_dir = cache_root,
|
.root_dir = cache_root,
|
||||||
.sub_path = try arena.dupe(u8, "p" ++ s ++ Manifest.hexDigest(f.actual_hash)),
|
.sub_path = try arena.dupe(u8, "p" ++ s ++ Manifest.hexDigest(f.actual_hash)),
|
||||||
@ -1420,7 +1430,7 @@ fn computeHash(
|
|||||||
f: *Fetch,
|
f: *Fetch,
|
||||||
pkg_path: Cache.Path,
|
pkg_path: Cache.Path,
|
||||||
filter: Filter,
|
filter: Filter,
|
||||||
) RunError!Manifest.Digest {
|
) RunError!struct { Manifest.Digest, u64 } {
|
||||||
// All the path name strings need to be in memory for sorting.
|
// All the path name strings need to be in memory for sorting.
|
||||||
const arena = f.arena.allocator();
|
const arena = f.arena.allocator();
|
||||||
const gpa = f.arena.child_allocator;
|
const gpa = f.arena.child_allocator;
|
||||||
@ -1499,6 +1509,7 @@ fn computeHash(
|
|||||||
.normalized_path = try normalizePathAlloc(arena, entry_pkg_path),
|
.normalized_path = try normalizePathAlloc(arena, entry_pkg_path),
|
||||||
.kind = kind,
|
.kind = kind,
|
||||||
.hash = undefined, // to be populated by the worker
|
.hash = undefined, // to be populated by the worker
|
||||||
|
.size = undefined, // to be populated by the worker
|
||||||
.failure = undefined, // to be populated by the worker
|
.failure = undefined, // to be populated by the worker
|
||||||
};
|
};
|
||||||
thread_pool.spawnWg(&wait_group, workerHashFile, .{ root_dir, hashed_file });
|
thread_pool.spawnWg(&wait_group, workerHashFile, .{ root_dir, hashed_file });
|
||||||
@ -1540,6 +1551,7 @@ fn computeHash(
|
|||||||
|
|
||||||
var hasher = Manifest.Hash.init(.{});
|
var hasher = Manifest.Hash.init(.{});
|
||||||
var any_failures = false;
|
var any_failures = false;
|
||||||
|
var total_size: u64 = 0;
|
||||||
for (all_files.items) |hashed_file| {
|
for (all_files.items) |hashed_file| {
|
||||||
hashed_file.failure catch |err| {
|
hashed_file.failure catch |err| {
|
||||||
any_failures = true;
|
any_failures = true;
|
||||||
@ -1550,6 +1562,7 @@ fn computeHash(
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
hasher.update(&hashed_file.hash);
|
hasher.update(&hashed_file.hash);
|
||||||
|
total_size += hashed_file.size;
|
||||||
}
|
}
|
||||||
for (deleted_files.items) |deleted_file| {
|
for (deleted_files.items) |deleted_file| {
|
||||||
deleted_file.failure catch |err| {
|
deleted_file.failure catch |err| {
|
||||||
@ -1574,7 +1587,7 @@ fn computeHash(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return hasher.finalResult();
|
return .{ hasher.finalResult(), total_size };
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dumpHashInfo(all_files: []const *const HashedFile) !void {
|
fn dumpHashInfo(all_files: []const *const HashedFile) !void {
|
||||||
@ -1604,20 +1617,20 @@ fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
|
|||||||
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
|
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
|
||||||
var buf: [8000]u8 = undefined;
|
var buf: [8000]u8 = undefined;
|
||||||
var hasher = Manifest.Hash.init(.{});
|
var hasher = Manifest.Hash.init(.{});
|
||||||
|
var size: u64 = 0;
|
||||||
hasher.update(hashed_file.normalized_path);
|
hasher.update(hashed_file.normalized_path);
|
||||||
|
|
||||||
switch (hashed_file.kind) {
|
switch (hashed_file.kind) {
|
||||||
.file => {
|
.file => {
|
||||||
var file = try dir.openFile(hashed_file.fs_path, .{});
|
var file = try dir.openFile(hashed_file.fs_path, .{});
|
||||||
defer file.close();
|
defer file.close();
|
||||||
// Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463
|
|
||||||
hasher.update(&.{ 0, 0 });
|
|
||||||
var file_header: FileHeader = .{};
|
var file_header: FileHeader = .{};
|
||||||
while (true) {
|
while (true) {
|
||||||
const bytes_read = try file.read(&buf);
|
const bytes_read = try file.read(&buf);
|
||||||
if (bytes_read == 0) break;
|
if (bytes_read == 0) break;
|
||||||
hasher.update(buf[0..bytes_read]);
|
hasher.update(buf[0..bytes_read]);
|
||||||
file_header.update(buf[0..bytes_read]);
|
file_header.update(buf[0..bytes_read]);
|
||||||
|
size += bytes_read;
|
||||||
}
|
}
|
||||||
if (file_header.isExecutable()) {
|
if (file_header.isExecutable()) {
|
||||||
try setExecutable(file);
|
try setExecutable(file);
|
||||||
@ -1635,6 +1648,7 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
hasher.final(&hashed_file.hash);
|
hasher.final(&hashed_file.hash);
|
||||||
|
hashed_file.size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
|
fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
|
||||||
@ -1662,6 +1676,7 @@ const HashedFile = struct {
|
|||||||
fs_path: []const u8,
|
fs_path: []const u8,
|
||||||
normalized_path: []const u8,
|
normalized_path: []const u8,
|
||||||
hash: Manifest.Digest,
|
hash: Manifest.Digest,
|
||||||
|
size: u64,
|
||||||
failure: Error!void,
|
failure: Error!void,
|
||||||
kind: Kind,
|
kind: Kind,
|
||||||
|
|
||||||
|
@ -6,6 +6,84 @@ pub const multihash_len = 1 + 1 + Hash.digest_length;
|
|||||||
pub const multihash_hex_digest_len = 2 * multihash_len;
|
pub const multihash_hex_digest_len = 2 * multihash_len;
|
||||||
pub const MultiHashHexDigest = [multihash_hex_digest_len]u8;
|
pub const MultiHashHexDigest = [multihash_hex_digest_len]u8;
|
||||||
|
|
||||||
|
pub const NewHashDecoded = extern struct {
|
||||||
|
name: [16]u8,
|
||||||
|
semver: [16]u8,
|
||||||
|
size: u32 align(1),
|
||||||
|
hash: [5]u8,
|
||||||
|
|
||||||
|
pub fn init(name: []const u8, semver: std.SemanticVersion, size: u32, hash: Digest) NewHashDecoded {
|
||||||
|
var result: NewHashDecoded = undefined;
|
||||||
|
|
||||||
|
const name_len = @min(name.len, result.name.len);
|
||||||
|
@memcpy(result.name[0..name_len], name[0..name_len]);
|
||||||
|
@memset(result.name[name_len..], 0);
|
||||||
|
|
||||||
|
if (std.fmt.bufPrint(&result.semver, "{}", .{semver})) |slice| {
|
||||||
|
@memset(result.semver[slice.len..], 0);
|
||||||
|
} else |err| switch (err) {
|
||||||
|
error.NoSpaceLeft => {
|
||||||
|
result.semver[result.semver.len - 1] = '-';
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result.size = size;
|
||||||
|
|
||||||
|
@memcpy(&result.hash, hash[0..result.hash.len]);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getName(h: *const NewHashDecoded) []const u8 {
|
||||||
|
const len = mem.indexOfScalar(u8, &h.name, 0) orelse h.name.len;
|
||||||
|
return h.name[0..len];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getSemVer(h: *const NewHashDecoded) []const u8 {
|
||||||
|
const len = mem.indexOfScalar(u8, &h.semver, 0) orelse h.semver.len;
|
||||||
|
return h.semver[0..len];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encode(h: NewHashDecoded) NewHashEncoded {
|
||||||
|
var result: NewHashEncoded = .{ .bytes = undefined };
|
||||||
|
var i: usize = 0;
|
||||||
|
|
||||||
|
const name = h.getName();
|
||||||
|
@memcpy(result.bytes[i..][0..name.len], name);
|
||||||
|
i += name.len;
|
||||||
|
|
||||||
|
result.bytes[i] = '-';
|
||||||
|
i += 1;
|
||||||
|
|
||||||
|
const semver = h.getSemVer();
|
||||||
|
@memcpy(result.bytes[i..][0..semver.len], semver);
|
||||||
|
i += semver.len;
|
||||||
|
|
||||||
|
result.bytes[i] = '-';
|
||||||
|
i += 1;
|
||||||
|
|
||||||
|
const hash_len = 5; // TODO `h.hash.len`
|
||||||
|
var decoded_bytes: [hash_len + @sizeOf(u32)]u8 = undefined;
|
||||||
|
std.mem.writeInt(u32, decoded_bytes[0..@sizeOf(u32)], h.size, .little);
|
||||||
|
@memcpy(decoded_bytes[@sizeOf(u32)..], &h.hash);
|
||||||
|
|
||||||
|
i += (std.fs.base64_encoder.encode(result.bytes[i..], &decoded_bytes)).len;
|
||||||
|
|
||||||
|
@memset(result.bytes[i..], 0);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const new_hash_encoded_max_len = std.fs.base64_encoder.calcSize(@sizeOf(NewHashDecoded));
|
||||||
|
pub const NewHashEncoded = extern struct {
|
||||||
|
bytes: [new_hash_encoded_max_len]u8,
|
||||||
|
|
||||||
|
pub fn toSlice(h: *const NewHashEncoded) []const u8 {
|
||||||
|
const len = mem.indexOfScalar(u8, &h.bytes, 0) orelse h.bytes.len;
|
||||||
|
return h.bytes[0..len];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const Dependency = struct {
|
pub const Dependency = struct {
|
||||||
location: Location,
|
location: Location,
|
||||||
location_tok: Ast.TokenIndex,
|
location_tok: Ast.TokenIndex,
|
||||||
@ -591,12 +669,14 @@ const Parse = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const Manifest = @This();
|
const Manifest = @This();
|
||||||
|
const builtin = @import("builtin");
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const Ast = std.zig.Ast;
|
const Ast = std.zig.Ast;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
|
const native_endian = builtin.cpu.arch.endian();
|
||||||
|
|
||||||
test "basic" {
|
test "basic" {
|
||||||
const gpa = testing.allocator;
|
const gpa = testing.allocator;
|
||||||
|
Loading…
Reference in New Issue
Block a user