DoxigAlpha

start

Create a new child progress node. Thread-safe.

Passing 0 for estimated_total_items means unknown.

Function parameters

Parameters

#
name:[]const u8
estimated_total_items:usize

Type definitions in this namespace

Types

#
Node
Represents one unit of progress.

Initializes a global Progress instance.

Functions

#
start
Initializes a global Progress instance.
lockStdErr
Allows the caller to freely write to stderr until `unlockStdErr` is called.
lockStderrWriter
Allows the caller to freely write to the returned `Writer`,

= switch (builtin.os.tag) { .wasi, .freestanding, .windows => false, else => true, }

Values

#
have_ipc
= switch (builtin.os.tag) { .wasi, .freestanding, .windows => false, else => true, }

Source

Implementation

#
pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node {
    if (noop_impl) {
        assert(node.index == .none);
        return Node.none;
    }
    const node_index = node.index.unwrap() orelse return Node.none;
    const parent = node_index.toParent();

    const freelist = &global_progress.node_freelist;
    var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry
    while (old_freelist.head.unwrap()) |free_index| {
        const next_ptr = freelistNextByIndex(free_index);
        const new_freelist: Freelist = .{
            .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic),
            // We don't need to increment the generation when removing nodes from the free list,
            // only when adding them. (This choice is arbitrary; the opposite would also work.)
            .generation = old_freelist.generation,
        };
        old_freelist = @cmpxchgWeak(
            Freelist,
            freelist,
            old_freelist,
            new_freelist,
            .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order
            .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration
        ) orelse {
            // We won the allocation race.
            return init(free_index, parent, name, estimated_total_items);
        };
    }

    const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic);
    if (free_index >= global_progress.node_storage.len) {
        // Ran out of node storage memory. Progress for this node will not be tracked.
        _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic);
        return Node.none;
    }

    return init(@enumFromInt(free_index), parent, name, estimated_total_items);
}