Compare commits

..

No commits in common. "main" and "v0.2.3" have entirely different histories.
main ... v0.2.3

13 changed files with 158 additions and 144 deletions

View file

@ -13,8 +13,6 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: mlugg/setup-zig@v2 - uses: mlugg/setup-zig@v2
with:
version: 0.15.2
- run: zig build test - run: zig build test
- run: zig fmt --check src/ - run: zig fmt --check src/
@ -30,16 +28,12 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: mlugg/setup-zig@v2 - uses: mlugg/setup-zig@v2
with:
version: 0.15.2
- name: Build ${{ matrix.target }} (${{ matrix.optimize }}) - name: Build ${{ matrix.target }} (${{ matrix.optimize }})
run: | run: |
zig build --release=${{ matrix.optimize == 'ReleaseSafe' && 'safe' || 'off' }} -Dtarget=${{ matrix.target }} zig build --release=${{ matrix.optimize == 'ReleaseSafe' && 'safe' || 'off' }} -Dtarget=${{ matrix.target }}
- name: Test help command - name: Verify binary
if: matrix.target == 'x86_64-linux' if: matrix.target == 'x86_64-linux'
run: ./zig-out/bin/fast-cli --help run: |
./zig-out/bin/fast-cli --help
- name: Check binary type file zig-out/bin/fast-cli
if: matrix.target == 'x86_64-linux'
run: file zig-out/bin/fast-cli

View file

@ -19,11 +19,9 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: mlugg/setup-zig@v2 - uses: mlugg/setup-zig@v2
with:
version: 0.15.2
- name: Build - name: Build
run: zig build -Doptimize=ReleaseFast -Dtarget=${{ matrix.target }} -Dcpu=baseline run: zig build --release=safe -Dtarget=${{ matrix.target }}
- name: Prepare artifact - name: Prepare artifact
run: | run: |
@ -41,8 +39,6 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: mlugg/setup-zig@v2 - uses: mlugg/setup-zig@v2
with:
version: 0.15.2
- run: zig build test - run: zig build test
release: release:

View file

@ -6,7 +6,7 @@
A blazingly fast CLI tool for testing internet speed uses fast.com v2 api. Written in Zig for maximum performance. A blazingly fast CLI tool for testing internet speed uses fast.com v2 api. Written in Zig for maximum performance.
**1.2 MB binary** • 🚀 **Zero runtime deps** • 📊 **Smart stability detection** **1.4 MiB binary** • 🚀 **Zero runtime deps** • 📊 **Smart stability detection**
## Demo ## Demo
@ -14,7 +14,7 @@ A blazingly fast CLI tool for testing internet speed uses fast.com v2 api. Writt
## Why fast-cli? ## Why fast-cli?
- **Tiny binary**: Just 1.2 MB, no runtime dependencies - **Tiny binary**: Just 1.2 MiB, no runtime dependencies
- **Blazing fast**: Concurrent connections with adaptive chunk sizing - **Blazing fast**: Concurrent connections with adaptive chunk sizing
- **Cross-platform**: Single binary for Linux, macOS - **Cross-platform**: Single binary for Linux, macOS
- **Smart stopping**: Uses Coefficient of Variation (CoV) algorithm for adaptive test duration - **Smart stopping**: Uses Coefficient of Variation (CoV) algorithm for adaptive test duration
@ -89,8 +89,7 @@ zig build
zig build test zig build test
# Release build # Release build
# Consider removing -Dcpu if you do not need a portable build zig build --release=safe
zig build -Doptimize=ReleaseFast -Dcpu=baseline
``` ```
## License ## License

View file

@ -4,31 +4,57 @@ pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{}); const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{}); const optimize = b.standardOptimizeOption(.{});
const dep_zli = b.dependency("zli", .{ .target = target }); // library tests
const dep_mvzr = b.dependency("mvzr", .{ .target = target, .optimize = optimize }); const library_tests = b.addTest(.{
.root_source_file = b.path("src/test.zig"),
.target = target,
.optimize = optimize,
});
const run_library_tests = b.addRunArtifact(library_tests);
const test_step = b.step("test", "Run all tests");
test_step.dependOn(&run_library_tests.step);
const dep_zli = b.dependency("zli", .{
.target = target,
});
const mod_zli = dep_zli.module("zli");
const dep_mvzr = b.dependency("mvzr", .{
.target = target,
.optimize = optimize,
});
const mod_mvzr = dep_mvzr.module("mvzr");
// Create build options for version info
const build_options = b.addOptions(); const build_options = b.addOptions();
// Read version from build.zig.zon at compile time
const build_zon_content = @embedFile("build.zig.zon"); const build_zon_content = @embedFile("build.zig.zon");
const version = blk: { const version = blk: {
// Simple parsing to extract version string
const start = std.mem.indexOf(u8, build_zon_content, ".version = \"") orelse unreachable; const start = std.mem.indexOf(u8, build_zon_content, ".version = \"") orelse unreachable;
const version_start = start + ".version = \"".len; const version_start = start + ".version = \"".len;
const end = std.mem.indexOfPos(u8, build_zon_content, version_start, "\"") orelse unreachable; const end = std.mem.indexOfPos(u8, build_zon_content, version_start, "\"") orelse unreachable;
break :blk build_zon_content[version_start..end]; break :blk build_zon_content[version_start..end];
}; };
build_options.addOption([]const u8, "version", version); build_options.addOption([]const u8, "version", version);
const exe = b.addExecutable(.{ const exe = b.addExecutable(.{
.name = "fast-cli", .name = "fast-cli",
.root_module = b.createModule(.{
.root_source_file = b.path("src/main.zig"), .root_source_file = b.path("src/main.zig"),
.target = target, .target = target,
.optimize = optimize, .optimize = optimize,
}), .strip = optimize != .Debug,
}); });
exe.root_module.addImport("zli", dep_zli.module("zli")); exe.root_module.addImport("zli", mod_zli);
exe.root_module.addImport("mvzr", dep_mvzr.module("mvzr")); exe.root_module.addImport("mvzr", mod_mvzr);
exe.root_module.addImport("build_options", build_options.createModule()); exe.root_module.addImport("build_options", build_options.createModule());
library_tests.root_module.addImport("mvzr", mod_mvzr);
// Link against the static library instead
b.installArtifact(exe); b.installArtifact(exe);
@ -41,15 +67,5 @@ pub fn build(b: *std.Build) void {
const run_step = b.step("run", "Run the app"); const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step); run_step.dependOn(&run_cmd.step);
const tests = b.addTest(.{ // b.default_step.dependOn(test_step); // Disabled for cross-compilation
.root_module = b.createModule(.{
.root_source_file = b.path("src/test.zig"),
.target = target,
.optimize = optimize,
}),
});
tests.root_module.addImport("mvzr", dep_mvzr.module("mvzr"));
const test_step = b.step("test", "Run tests");
test_step.dependOn(&b.addRunArtifact(tests).step);
} }

View file

@ -1,20 +1,20 @@
.{ .{
.name = .fast_cli, .name = .fast_cli,
.version = "0.2.4", .version = "0.2.3",
.fingerprint = 0xfb5a9fbee5075971, .fingerprint = 0xfb5a9fbee5075971, // Changing this has security and trust implications.
.minimum_zig_version = "0.15.1", .minimum_zig_version = "0.14.0",
.dependencies = .{ .dependencies = .{
.zli = .{
.url = "https://github.com/xcaeser/zli/archive/v4.1.1.tar.gz",
.hash = "zli-4.1.1-LeUjpljfAAAak_E3L4NPowuzPs_FUF9-jYyxuTSNSthM",
},
.mvzr = .{ .mvzr = .{
.url = "https://github.com/mnemnion/mvzr/archive/refs/tags/v0.3.7.tar.gz", .url = "https://github.com/mnemnion/mvzr/archive/refs/tags/v0.3.3.tar.gz",
.hash = "mvzr-0.3.7-ZSOky5FtAQB2VrFQPNbXHQCFJxWTMAYEK7ljYEaMR6jt", .hash = "mvzr-0.3.2-ZSOky95lAQA00lXTN_g8JWoBuh8pw-jyzmCWAqlu1h8L",
},
.zli = .{
.url = "https://github.com/xcaeser/zli/archive/v3.7.0.tar.gz",
.hash = "zli-3.7.0-LeUjpq8uAQCl8uh-ws3jdXsnbCwMZQgcZQx4TVXHLSeQ",
}, },
}, },
.paths = .{ .paths = .{

View file

@ -1,9 +1,6 @@
const std = @import("std"); const std = @import("std");
const zli = @import("zli"); const zli = @import("zli");
const builtin = @import("builtin"); const builtin = @import("builtin");
const Writer = std.Io.Writer;
const log = std.log.scoped(.cli);
const Fast = @import("../lib/fast.zig").Fast; const Fast = @import("../lib/fast.zig").Fast;
const HTTPSpeedTester = @import("../lib/http_speed_tester_v2.zig").HTTPSpeedTester; const HTTPSpeedTester = @import("../lib/http_speed_tester_v2.zig").HTTPSpeedTester;
@ -14,6 +11,7 @@ const BandwidthMeter = @import("../lib/bandwidth.zig");
const SpeedMeasurement = @import("../lib/bandwidth.zig").SpeedMeasurement; const SpeedMeasurement = @import("../lib/bandwidth.zig").SpeedMeasurement;
const progress = @import("../lib/progress.zig"); const progress = @import("../lib/progress.zig");
const HttpLatencyTester = @import("../lib/http_latency_tester.zig").HttpLatencyTester; const HttpLatencyTester = @import("../lib/http_latency_tester.zig").HttpLatencyTester;
const log = std.log.scoped(.cli);
const https_flag = zli.Flag{ const https_flag = zli.Flag{
.name = "https", .name = "https",
@ -46,8 +44,8 @@ const max_duration_flag = zli.Flag{
.default_value = .{ .Int = 30 }, .default_value = .{ .Int = 30 },
}; };
pub fn build(writer: *Writer, allocator: std.mem.Allocator) !*zli.Command { pub fn build(allocator: std.mem.Allocator) !*zli.Command {
const root = try zli.Command.init(writer, allocator, .{ const root = try zli.Command.init(allocator, .{
.name = "fast-cli", .name = "fast-cli",
.description = "Estimate connection speed using fast.com", .description = "Estimate connection speed using fast.com",
.version = null, .version = null,
@ -67,8 +65,6 @@ fn run(ctx: zli.CommandContext) !void {
const json_output = ctx.flag("json", bool); const json_output = ctx.flag("json", bool);
const max_duration = ctx.flag("duration", i64); const max_duration = ctx.flag("duration", i64);
const spinner = ctx.spinner;
log.info("Config: https={}, upload={}, json={}, max_duration={}s", .{ log.info("Config: https={}, upload={}, json={}, max_duration={}s", .{
use_https, check_upload, json_output, max_duration, use_https, check_upload, json_output, max_duration,
}); });
@ -78,20 +74,20 @@ fn run(ctx: zli.CommandContext) !void {
const urls = fast.get_urls(5) catch |err| { const urls = fast.get_urls(5) catch |err| {
if (!json_output) { if (!json_output) {
try spinner.fail("Failed to get URLs: {}", .{err}); try ctx.spinner.fail("Failed to get URLs: {}", .{err});
} else { } else {
const error_msg = switch (err) { const error_msg = switch (err) {
error.ConnectionTimeout => "Failed to contact fast.com servers", error.ConnectionTimeout => "Failed to contact fast.com servers",
else => "Failed to get URLs", else => "Failed to get URLs",
}; };
try outputJson(ctx.writer, null, null, null, error_msg); try outputJson(null, null, null, error_msg);
} }
return; return;
}; };
log.info("Got {} URLs\n", .{urls.len}); log.info("Got {} URLs", .{urls.len});
for (urls) |url| { for (urls) |url| {
log.info("URL: {s}\n", .{url}); log.debug("URL: {s}", .{url});
} }
// Measure latency first // Measure latency first
@ -99,9 +95,9 @@ fn run(ctx: zli.CommandContext) !void {
defer latency_tester.deinit(); defer latency_tester.deinit();
const latency_ms = if (!json_output) blk: { const latency_ms = if (!json_output) blk: {
try spinner.start("Measuring latency...", .{}); try ctx.spinner.start(.{}, "Measuring latency...", .{});
const result = latency_tester.measureLatency(urls) catch |err| { const result = latency_tester.measureLatency(urls) catch |err| {
try spinner.fail("Latency test failed: {}", .{err}); log.err("Latency test failed: {}", .{err});
break :blk null; break :blk null;
}; };
break :blk result; break :blk result;
@ -110,7 +106,7 @@ fn run(ctx: zli.CommandContext) !void {
}; };
if (!json_output) { if (!json_output) {
log.info("Measuring download speed...", .{}); try ctx.spinner.start(.{}, "Measuring download speed...", .{});
} }
// Initialize speed tester // Initialize speed tester
@ -130,15 +126,15 @@ fn run(ctx: zli.CommandContext) !void {
const download_result = if (json_output) blk: { const download_result = if (json_output) blk: {
// JSON mode: clean output only // JSON mode: clean output only
break :blk speed_tester.measure_download_speed_stability(urls, criteria) catch |err| { break :blk speed_tester.measure_download_speed_stability(urls, criteria) catch |err| {
try spinner.fail("Download test failed: {}", .{err}); log.err("Download test failed: {}", .{err});
try outputJson(ctx.writer, null, null, null, "Download test failed"); try outputJson(null, null, null, "Download test failed");
return; return;
}; };
} else blk: { } else blk: {
// Interactive mode with spinner updates // Interactive mode with spinner updates
const progressCallback = progress.createCallback(spinner, updateSpinnerText); const progressCallback = progress.createCallback(ctx.spinner, updateSpinnerText);
break :blk speed_tester.measureDownloadSpeedWithStabilityProgress(urls, criteria, progressCallback) catch |err| { break :blk speed_tester.measureDownloadSpeedWithStabilityProgress(urls, criteria, progressCallback) catch |err| {
try spinner.fail("Download test failed: {}", .{err}); try ctx.spinner.fail("Download test failed: {}", .{err});
return; return;
}; };
}; };
@ -146,21 +142,21 @@ fn run(ctx: zli.CommandContext) !void {
var upload_result: ?SpeedTestResult = null; var upload_result: ?SpeedTestResult = null;
if (check_upload) { if (check_upload) {
if (!json_output) { if (!json_output) {
log.info("Measuring upload speed...", .{}); try ctx.spinner.start(.{}, "Measuring upload speed...", .{});
} }
upload_result = if (json_output) blk: { upload_result = if (json_output) blk: {
// JSON mode: clean output only // JSON mode: clean output only
break :blk speed_tester.measure_upload_speed_stability(urls, criteria) catch |err| { break :blk speed_tester.measure_upload_speed_stability(urls, criteria) catch |err| {
try spinner.fail("Upload test failed: {}", .{err}); log.err("Upload test failed: {}", .{err});
try outputJson(ctx.writer, download_result.speed.value, latency_ms, null, "Upload test failed"); try outputJson(download_result.speed.value, latency_ms, null, "Upload test failed");
return; return;
}; };
} else blk: { } else blk: {
// Interactive mode with spinner updates // Interactive mode with spinner updates
const uploadProgressCallback = progress.createCallback(spinner, updateUploadSpinnerText); const uploadProgressCallback = progress.createCallback(ctx.spinner, updateUploadSpinnerText);
break :blk speed_tester.measureUploadSpeedWithStabilityProgress(urls, criteria, uploadProgressCallback) catch |err| { break :blk speed_tester.measureUploadSpeedWithStabilityProgress(urls, criteria, uploadProgressCallback) catch |err| {
try spinner.fail("Upload test failed: {}", .{err}); try ctx.spinner.fail("Upload test failed: {}", .{err});
return; return;
}; };
}; };
@ -170,34 +166,36 @@ fn run(ctx: zli.CommandContext) !void {
if (!json_output) { if (!json_output) {
if (latency_ms) |ping| { if (latency_ms) |ping| {
if (upload_result) |up| { if (upload_result) |up| {
try spinner.succeed("🏓 {d:.0}ms | ⬇️ Download: {d:.1} {s} | ⬆️ Upload: {d:.1} {s}", .{ ping, download_result.speed.value, download_result.speed.unit.toString(), up.speed.value, up.speed.unit.toString() }); try ctx.spinner.succeed("🏓 {d:.0}ms | ⬇️ Download: {d:.1} {s} | ⬆️ Upload: {d:.1} {s}", .{ ping, download_result.speed.value, download_result.speed.unit.toString(), up.speed.value, up.speed.unit.toString() });
} else { } else {
try spinner.succeed("🏓 {d:.0}ms | ⬇️ Download: {d:.1} {s}", .{ ping, download_result.speed.value, download_result.speed.unit.toString() }); try ctx.spinner.succeed("🏓 {d:.0}ms | ⬇️ Download: {d:.1} {s}", .{ ping, download_result.speed.value, download_result.speed.unit.toString() });
} }
} else { } else {
if (upload_result) |up| { if (upload_result) |up| {
try spinner.succeed("⬇️ Download: {d:.1} {s} | ⬆️ Upload: {d:.1} {s}", .{ download_result.speed.value, download_result.speed.unit.toString(), up.speed.value, up.speed.unit.toString() }); try ctx.spinner.succeed("⬇️ Download: {d:.1} {s} | ⬆️ Upload: {d:.1} {s}", .{ download_result.speed.value, download_result.speed.unit.toString(), up.speed.value, up.speed.unit.toString() });
} else { } else {
try spinner.succeed("⬇️ Download: {d:.1} {s}", .{ download_result.speed.value, download_result.speed.unit.toString() }); try ctx.spinner.succeed("⬇️ Download: {d:.1} {s}", .{ download_result.speed.value, download_result.speed.unit.toString() });
} }
} }
} else { } else {
const upload_speed = if (upload_result) |up| up.speed.value else null; const upload_speed = if (upload_result) |up| up.speed.value else null;
try outputJson(ctx.writer, download_result.speed.value, latency_ms, upload_speed, null); try outputJson(download_result.speed.value, latency_ms, upload_speed, null);
} }
} }
/// Update spinner text with current speed measurement /// Update spinner text with current speed measurement
fn updateSpinnerText(spinner: anytype, measurement: SpeedMeasurement) void { fn updateSpinnerText(spinner: anytype, measurement: SpeedMeasurement) void {
spinner.updateMessage("⬇️ {d:.1} {s}", .{ measurement.value, measurement.unit.toString() }) catch {}; spinner.updateText("⬇️ {d:.1} {s}", .{ measurement.value, measurement.unit.toString() }) catch {};
} }
/// Update spinner text with current upload speed measurement /// Update spinner text with current upload speed measurement
fn updateUploadSpinnerText(spinner: anytype, measurement: SpeedMeasurement) void { fn updateUploadSpinnerText(spinner: anytype, measurement: SpeedMeasurement) void {
spinner.updateMessage("⬆️ {d:.1} {s}", .{ measurement.value, measurement.unit.toString() }) catch {}; spinner.updateText("⬆️ {d:.1} {s}", .{ measurement.value, measurement.unit.toString() }) catch {};
} }
fn outputJson(writer: *Writer, download_mbps: ?f64, ping_ms: ?f64, upload_mbps: ?f64, error_message: ?[]const u8) !void { fn outputJson(download_mbps: ?f64, ping_ms: ?f64, upload_mbps: ?f64, error_message: ?[]const u8) !void {
const stdout = std.io.getStdOut().writer();
var download_buf: [32]u8 = undefined; var download_buf: [32]u8 = undefined;
var ping_buf: [32]u8 = undefined; var ping_buf: [32]u8 = undefined;
var upload_buf: [32]u8 = undefined; var upload_buf: [32]u8 = undefined;
@ -208,5 +206,5 @@ fn outputJson(writer: *Writer, download_mbps: ?f64, ping_ms: ?f64, upload_mbps:
const upload_str = if (upload_mbps) |u| try std.fmt.bufPrint(&upload_buf, "{d:.1}", .{u}) else "null"; const upload_str = if (upload_mbps) |u| try std.fmt.bufPrint(&upload_buf, "{d:.1}", .{u}) else "null";
const error_str = if (error_message) |e| try std.fmt.bufPrint(&error_buf, "\"{s}\"", .{e}) else "null"; const error_str = if (error_message) |e| try std.fmt.bufPrint(&error_buf, "\"{s}\"", .{e}) else "null";
try writer.print("{{\"download_mbps\": {s}, \"ping_ms\": {s}, \"upload_mbps\": {s}, \"error\": {s}}}\n", .{ download_str, ping_str, upload_str, error_str }); try stdout.print("{{\"download_mbps\": {s}, \"ping_ms\": {s}, \"upload_mbps\": {s}, \"error\": {s}}}\n", .{ download_str, ping_str, upload_str, error_str });
} }

View file

@ -107,7 +107,7 @@ test "BandwidthMeter bandwidth calculation" {
meter.update_total(1000); // 1000 bytes meter.update_total(1000); // 1000 bytes
// Sleep briefly to ensure time passes // Sleep briefly to ensure time passes
std.Thread.sleep(std.time.ns_per_ms * 10); // 10ms std.time.sleep(std.time.ns_per_ms * 10); // 10ms
const bw = meter.bandwidth(); const bw = meter.bandwidth();
try testing.expect(bw > 0); try testing.expect(bw > 0);
@ -127,7 +127,7 @@ test "BandwidthMeter unit conversion" {
// Test different speed ranges // Test different speed ranges
meter._bytes_transferred = 1000; meter._bytes_transferred = 1000;
meter._timer = try std.time.Timer.start(); meter._timer = try std.time.Timer.start();
std.Thread.sleep(std.time.ns_per_s); // 1 second std.time.sleep(std.time.ns_per_s); // 1 second
const measurement = meter.bandwidthWithUnits(); const measurement = meter.bandwidthWithUnits();

View file

@ -71,7 +71,7 @@ pub const Fast = struct {
var result = try Fast.parse_response_urls(json_data.items, allocator); var result = try Fast.parse_response_urls(json_data.items, allocator);
return result.toOwnedSlice(allocator); return result.toOwnedSlice();
} }
/// Sanitizes JSON data by replacing invalid UTF-8 bytes that cause parseFromSlice to fail. /// Sanitizes JSON data by replacing invalid UTF-8 bytes that cause parseFromSlice to fail.
@ -102,7 +102,7 @@ pub const Fast = struct {
} }
fn parse_response_urls(json_data: []const u8, result_allocator: std.mem.Allocator) !std.ArrayList([]const u8) { fn parse_response_urls(json_data: []const u8, result_allocator: std.mem.Allocator) !std.ArrayList([]const u8) {
var result = std.ArrayList([]const u8).empty; var result = std.ArrayList([]const u8).init(result_allocator);
const sanitized_json = try sanitize_json(json_data, result_allocator); const sanitized_json = try sanitize_json(json_data, result_allocator);
defer result_allocator.free(sanitized_json); defer result_allocator.free(sanitized_json);
@ -119,7 +119,7 @@ pub const Fast = struct {
for (response.targets) |target| { for (response.targets) |target| {
const url_copy = try result_allocator.dupe(u8, target.url); const url_copy = try result_allocator.dupe(u8, target.url);
try result.append(result_allocator, url_copy); try result.append(url_copy);
} }
return result; return result;
@ -155,13 +155,13 @@ pub const Fast = struct {
} }
fn get_page(self: *Fast, allocator: std.mem.Allocator, url: []const u8) !std.ArrayList(u8) { fn get_page(self: *Fast, allocator: std.mem.Allocator, url: []const u8) !std.ArrayList(u8) {
var response_body = std.Io.Writer.Allocating.init(allocator); _ = allocator;
var response_body = std.ArrayList(u8).init(self.arena.allocator());
const response: http.Client.FetchResult = self.client.fetch(.{ const response: http.Client.FetchResult = self.client.fetch(.{
.method = .GET, .method = .GET,
.location = .{ .url = url }, .location = .{ .url = url },
.response_writer = &response_body.writer, .response_storage = .{ .dynamic = &response_body },
// .response_storage = .{ .dynamic = &response_body },
}) catch |err| switch (err) { }) catch |err| switch (err) {
error.NetworkUnreachable, error.ConnectionRefused => { error.NetworkUnreachable, error.ConnectionRefused => {
log.err("Failed to reach fast.com servers (network/connection error) for URL: {s}", .{url}); log.err("Failed to reach fast.com servers (network/connection error) for URL: {s}", .{url});
@ -195,7 +195,7 @@ pub const Fast = struct {
log.err("HTTP request failed with status code {}", .{response.status}); log.err("HTTP request failed with status code {}", .{response.status});
return error.HttpRequestFailed; return error.HttpRequestFailed;
} }
return response_body.toArrayList(); return response_body;
} }
}; };
@ -205,12 +205,12 @@ test "parse_response_urls_v2" {
; ;
const allocator = testing.allocator; const allocator = testing.allocator;
var urls = try Fast.parse_response_urls(response, allocator); const urls = try Fast.parse_response_urls(response, allocator);
defer { defer {
for (urls.items) |url| { for (urls.items) |url| {
allocator.free(url); allocator.free(url);
} }
urls.deinit(allocator); urls.deinit();
} }
try testing.expect(urls.items.len == 2); try testing.expect(urls.items.len == 2);
@ -275,12 +275,12 @@ test "parse_response_without_isp" {
; ;
const allocator = testing.allocator; const allocator = testing.allocator;
var urls = try Fast.parse_response_urls(response, allocator); const urls = try Fast.parse_response_urls(response, allocator);
defer { defer {
for (urls.items) |url| { for (urls.items) |url| {
allocator.free(url); allocator.free(url);
} }
urls.deinit(allocator); urls.deinit();
} }
try testing.expect(urls.items.len == 1); try testing.expect(urls.items.len == 1);
@ -293,12 +293,12 @@ test "parse_response_minimal_client" {
; ;
const allocator = testing.allocator; const allocator = testing.allocator;
var urls = try Fast.parse_response_urls(response, allocator); const urls = try Fast.parse_response_urls(response, allocator);
defer { defer {
for (urls.items) |url| { for (urls.items) |url| {
allocator.free(url); allocator.free(url);
} }
urls.deinit(allocator); urls.deinit();
} }
try testing.expect(urls.items.len == 1); try testing.expect(urls.items.len == 1);

View file

@ -1,6 +1,5 @@
const std = @import("std"); const std = @import("std");
const http = std.http; const http = std.http;
const log = std.log.scoped(.cli);
pub const HttpLatencyTester = struct { pub const HttpLatencyTester = struct {
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
@ -19,22 +18,16 @@ pub const HttpLatencyTester = struct {
/// Measure latency to multiple URLs using HEAD requests /// Measure latency to multiple URLs using HEAD requests
/// Returns median latency in milliseconds, or null if all requests failed /// Returns median latency in milliseconds, or null if all requests failed
/// Zig's http client seems to be ~20ms slower than curl.
/// Let's not worry about that misreporting for now
pub fn measureLatency(self: *Self, urls: []const []const u8) !?f64 { pub fn measureLatency(self: *Self, urls: []const []const u8) !?f64 {
if (urls.len == 0) return null; if (urls.len == 0) return null;
var latencies: std.ArrayList(f64) = .{}; var latencies = std.ArrayList(f64).init(self.allocator);
defer latencies.deinit(self.allocator); defer latencies.deinit();
// HTTP client for all requests
var client = http.Client{ .allocator = self.allocator };
defer client.deinit();
// Test each URL // Test each URL
for (urls) |url| { for (urls) |url| {
if (self.measureSingleUrl(url, &client)) |latency_ms| { if (self.measureSingleUrl(url)) |latency_ms| {
try latencies.append(self.allocator, latency_ms); try latencies.append(latency_ms);
} else |_| { } else |_| {
// Ignore errors, continue with other URLs // Ignore errors, continue with other URLs
continue; continue;
@ -43,26 +36,50 @@ pub const HttpLatencyTester = struct {
if (latencies.items.len == 0) return null; if (latencies.items.len == 0) return null;
log.info("Latencies: {any}", .{latencies.items});
// Return median latency // Return median latency
return self.calculateMedian(latencies.items); return self.calculateMedian(latencies.items);
} }
/// Measure latency to a single URL using HEAD request /// Measure latency to a single URL using connection reuse method
fn measureSingleUrl(self: *Self, url: []const u8, client: *http.Client) !f64 { /// First request establishes HTTPS connection, second request measures pure RTT
_ = self; fn measureSingleUrl(self: *Self, url: []const u8) !f64 {
var client = http.Client{ .allocator = self.allocator };
defer client.deinit();
// Parse URL // Parse URL
const uri = try std.Uri.parse(url); const uri = try std.Uri.parse(url);
// Measure request/response timing // First request: Establish HTTPS connection (ignore timing)
{
const server_header_buffer = try self.allocator.alloc(u8, 4096);
defer self.allocator.free(server_header_buffer);
var req = try client.open(.HEAD, uri, .{
.server_header_buffer = server_header_buffer,
});
defer req.deinit();
try req.send();
try req.finish();
try req.wait();
}
// Second request: Reuse connection and measure pure HTTP RTT
const start_time = std.time.nanoTimestamp(); const start_time = std.time.nanoTimestamp();
_ = try client.fetch(.{ {
.method = .HEAD, const server_header_buffer = try self.allocator.alloc(u8, 4096);
.location = .{ .uri = uri }, defer self.allocator.free(server_header_buffer);
var req = try client.open(.HEAD, uri, .{
.server_header_buffer = server_header_buffer,
}); });
defer req.deinit();
try req.send();
try req.finish();
try req.wait();
}
const end_time = std.time.nanoTimestamp(); const end_time = std.time.nanoTimestamp();

View file

@ -154,7 +154,7 @@ pub const HTTPSpeedTester = struct {
// Main measurement loop // Main measurement loop
while (strategy.shouldContinue(timer.timer_interface().read())) { while (strategy.shouldContinue(timer.timer_interface().read())) {
std.Thread.sleep(strategy.getSleepInterval()); std.time.sleep(strategy.getSleepInterval());
if (has_progress) { if (has_progress) {
const current_bytes = worker_manager.getCurrentDownloadBytes(workers); const current_bytes = worker_manager.getCurrentDownloadBytes(workers);
@ -221,7 +221,7 @@ pub const HTTPSpeedTester = struct {
// Main measurement loop // Main measurement loop
while (strategy.shouldContinue(timer.timer_interface().read())) { while (strategy.shouldContinue(timer.timer_interface().read())) {
std.Thread.sleep(strategy.getSleepInterval()); std.time.sleep(strategy.getSleepInterval());
if (has_progress) { if (has_progress) {
const current_bytes = worker_manager.getCurrentUploadBytes(workers); const current_bytes = worker_manager.getCurrentUploadBytes(workers);
@ -285,7 +285,7 @@ pub const HTTPSpeedTester = struct {
// Main measurement loop // Main measurement loop
while (strategy.shouldContinue(timer.timer_interface().read())) { while (strategy.shouldContinue(timer.timer_interface().read())) {
std.Thread.sleep(strategy.getSleepInterval()); std.time.sleep(strategy.getSleepInterval());
const current_bytes = worker_manager.getCurrentDownloadBytes(workers); const current_bytes = worker_manager.getCurrentDownloadBytes(workers);
@ -359,7 +359,7 @@ pub const HTTPSpeedTester = struct {
// Main measurement loop // Main measurement loop
while (strategy.shouldContinue(timer.timer_interface().read())) { while (strategy.shouldContinue(timer.timer_interface().read())) {
std.Thread.sleep(strategy.getSleepInterval()); std.time.sleep(strategy.getSleepInterval());
const current_bytes = worker_manager.getCurrentUploadBytes(workers); const current_bytes = worker_manager.getCurrentUploadBytes(workers);

View file

@ -31,7 +31,6 @@ pub const StabilityStrategy = struct {
last_sample_time: u64 = 0, last_sample_time: u64 = 0,
last_total_bytes: u64 = 0, last_total_bytes: u64 = 0,
consecutive_stable_checks: u32 = 0, consecutive_stable_checks: u32 = 0,
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator, criteria: StabilityCriteria) StabilityStrategy { pub fn init(allocator: std.mem.Allocator, criteria: StabilityCriteria) StabilityStrategy {
return StabilityStrategy{ return StabilityStrategy{
@ -39,13 +38,12 @@ pub const StabilityStrategy = struct {
.ramp_up_duration_ns = @as(u64, criteria.ramp_up_duration_seconds) * std.time.ns_per_s, .ramp_up_duration_ns = @as(u64, criteria.ramp_up_duration_seconds) * std.time.ns_per_s,
.max_duration_ns = @as(u64, criteria.max_duration_seconds) * std.time.ns_per_s, .max_duration_ns = @as(u64, criteria.max_duration_seconds) * std.time.ns_per_s,
.measurement_interval_ns = criteria.measurement_interval_ms * std.time.ns_per_ms, .measurement_interval_ns = criteria.measurement_interval_ms * std.time.ns_per_ms,
.speed_measurements = std.ArrayList(f64).empty, .speed_measurements = std.ArrayList(f64).init(allocator),
.allocator = allocator,
}; };
} }
pub fn deinit(self: *StabilityStrategy) void { pub fn deinit(self: *StabilityStrategy) void {
self.speed_measurements.deinit(self.allocator); self.speed_measurements.deinit();
} }
pub fn shouldContinue(self: StabilityStrategy, current_time: u64) bool { pub fn shouldContinue(self: StabilityStrategy, current_time: u64) bool {
@ -71,7 +69,7 @@ pub const StabilityStrategy = struct {
// Phase 1: Ramp-up - collect measurements but don't check stability // Phase 1: Ramp-up - collect measurements but don't check stability
if (current_time < self.ramp_up_duration_ns) { if (current_time < self.ramp_up_duration_ns) {
try self.speed_measurements.append(self.allocator, interval_speed); try self.speed_measurements.append(interval_speed);
// Keep sliding window size // Keep sliding window size
if (self.speed_measurements.items.len > self.criteria.sliding_window_size) { if (self.speed_measurements.items.len > self.criteria.sliding_window_size) {
@ -79,7 +77,7 @@ pub const StabilityStrategy = struct {
} }
} else { } else {
// Phase 2: Stabilization - check CoV for stability // Phase 2: Stabilization - check CoV for stability
try self.speed_measurements.append(self.allocator, interval_speed); try self.speed_measurements.append(interval_speed);
// Maintain sliding window // Maintain sliding window
if (self.speed_measurements.items.len > self.criteria.sliding_window_size) { if (self.speed_measurements.items.len > self.criteria.sliding_window_size) {

View file

@ -169,7 +169,7 @@ pub const DownloadWorker = struct {
_ = self.error_count.fetchAdd(1, .monotonic); _ = self.error_count.fetchAdd(1, .monotonic);
break; break;
} }
std.Thread.sleep(std.time.ns_per_ms * 100); std.time.sleep(std.time.ns_per_ms * 100);
continue; continue;
}; };
defer response.deinit(); defer response.deinit();
@ -183,7 +183,7 @@ pub const DownloadWorker = struct {
// Accept both 200 (full content) and 206 (partial content) // Accept both 200 (full content) and 206 (partial content)
if (response.status != .ok and response.status != .partial_content) { if (response.status != .ok and response.status != .partial_content) {
print("Worker {} HTTP error: {}\n", .{ self.config.worker_id, response.status }); print("Worker {} HTTP error: {}\n", .{ self.config.worker_id, response.status });
std.Thread.sleep(std.time.ns_per_ms * 100); std.time.sleep(std.time.ns_per_ms * 100);
continue; continue;
} }
@ -196,7 +196,7 @@ pub const DownloadWorker = struct {
// Small delay between requests // Small delay between requests
if (self.config.delay_between_requests_ms > 0) { if (self.config.delay_between_requests_ms > 0) {
std.Thread.sleep(std.time.ns_per_ms * self.config.delay_between_requests_ms); std.time.sleep(std.time.ns_per_ms * self.config.delay_between_requests_ms);
} }
} }
} }
@ -318,7 +318,7 @@ pub const UploadWorker = struct {
_ = self.error_count.fetchAdd(1, .monotonic); _ = self.error_count.fetchAdd(1, .monotonic);
break; break;
} }
std.Thread.sleep(std.time.ns_per_ms * 100); std.time.sleep(std.time.ns_per_ms * 100);
continue; continue;
}; };
defer response.deinit(); defer response.deinit();
@ -331,7 +331,7 @@ pub const UploadWorker = struct {
if (response.status != .ok) { if (response.status != .ok) {
print("Upload worker {} HTTP error: {}\n", .{ self.config.worker_id, response.status }); print("Upload worker {} HTTP error: {}\n", .{ self.config.worker_id, response.status });
std.Thread.sleep(std.time.ns_per_ms * 100); std.time.sleep(std.time.ns_per_ms * 100);
continue; continue;
} }
@ -404,14 +404,15 @@ pub const RealHttpClient = struct {
fn fetch(ptr: *anyopaque, request: FetchRequest) !FetchResponse { fn fetch(ptr: *anyopaque, request: FetchRequest) !FetchResponse {
const self: *Self = @ptrCast(@alignCast(ptr)); const self: *Self = @ptrCast(@alignCast(ptr));
var response_body = std.Io.Writer.Allocating.init(self.allocator); var response_body = std.ArrayList(u8).init(self.allocator);
errdefer response_body.deinit(); errdefer response_body.deinit();
const fetch_options = http.Client.FetchOptions{ const fetch_options = http.Client.FetchOptions{
.method = request.method, .method = request.method,
.location = .{ .url = request.url }, .location = .{ .url = request.url },
.payload = if (request.payload) |p| p else null, .payload = if (request.payload) |p| p else null,
.response_writer = &response_body.writer, .response_storage = .{ .dynamic = &response_body },
.max_append_size = request.max_response_size,
}; };
const result = try self.client.fetch(fetch_options); const result = try self.client.fetch(fetch_options);
@ -468,7 +469,7 @@ pub const MockHttpClient = struct {
pub fn init(allocator: std.mem.Allocator) Self { pub fn init(allocator: std.mem.Allocator) Self {
return Self{ return Self{
.allocator = allocator, .allocator = allocator,
.responses = std.ArrayList(FetchResponse).empty, .responses = std.ArrayList(FetchResponse).init(allocator),
.request_count = std.atomic.Value(u32).init(0), .request_count = std.atomic.Value(u32).init(0),
}; };
} }
@ -477,12 +478,12 @@ pub const MockHttpClient = struct {
for (self.responses.items) |*response| { for (self.responses.items) |*response| {
self.allocator.free(response.body); self.allocator.free(response.body);
} }
self.responses.deinit(self.allocator); self.responses.deinit();
} }
pub fn addResponse(self: *Self, status: http.Status, body: []const u8) !void { pub fn addResponse(self: *Self, status: http.Status, body: []const u8) !void {
const body_copy = try self.allocator.dupe(u8, body); const body_copy = try self.allocator.dupe(u8, body);
try self.responses.append(self.allocator, FetchResponse{ try self.responses.append(FetchResponse{
.status = status, .status = status,
.body = body_copy, .body = body_copy,
.allocator = self.allocator, .allocator = self.allocator,
@ -504,7 +505,7 @@ pub const MockHttpClient = struct {
_ = request; _ = request;
if (self.delay_ms > 0) { if (self.delay_ms > 0) {
std.Thread.sleep(std.time.ns_per_ms * self.delay_ms); std.time.sleep(std.time.ns_per_ms * self.delay_ms);
} }
if (self.should_fail) { if (self.should_fail) {
@ -610,7 +611,7 @@ test "DownloadWorker basic functionality" {
const thread = try std.Thread.spawn(.{}, DownloadWorker.run, .{&worker}); const thread = try std.Thread.spawn(.{}, DownloadWorker.run, .{&worker});
// Let it run for a bit // Let it run for a bit
std.Thread.sleep(std.time.ns_per_ms * 100); std.time.sleep(std.time.ns_per_ms * 100);
// Advance timer to trigger stop // Advance timer to trigger stop
mock_timer.setTime(std.time.ns_per_s * 3); // 3 seconds mock_timer.setTime(std.time.ns_per_s * 3); // 3 seconds

View file

@ -1,5 +1,4 @@
const std = @import("std"); const std = @import("std");
const cli = @import("cli/root.zig"); const cli = @import("cli/root.zig");
pub const std_options: std.Options = .{ pub const std_options: std.Options = .{
@ -12,11 +11,7 @@ pub const std_options: std.Options = .{
pub fn main() !void { pub fn main() !void {
const allocator = std.heap.smp_allocator; const allocator = std.heap.smp_allocator;
var root = try cli.build(allocator);
const file = std.fs.File.stdout();
var writer = file.writerStreaming(&.{}).interface;
const root = try cli.build(&writer, allocator);
defer root.deinit(); defer root.deinit();
try root.execute(.{}); try root.execute(.{});