package main
import "core:encoding/json"
import "core:flags"
import "core:fmt"
import "core:os"
import "core:path/filepath"
import "core:slice"
import "core:strings"
import "core:sync/chan"
import "core:sys/posix"
import "core:thread"
import "core:time"
import "core:mem"
import "./vtty"
Package_Json :: struct {
name: string,
workspaces: []string,
scripts: map[string]string,
}
IGNORED_FILEPATHS :: []string{"node_modules", ".git", "dist", "out", "tmp", "tmp-output"}
read_package_json :: proc(filepath: string) -> (Package_Json, bool) {
package_json: Package_Json
raw_json, err := os.read_entire_file(filepath, context.allocator)
if err != nil {
return package_json, false
}
defer delete(raw_json)
if err := json.unmarshal(
raw_json,
&package_json,
json.DEFAULT_SPECIFICATION,
context.temp_allocator,
); err != nil {
return {}, false
}
defer free_all(context.temp_allocator)
workspaces := make([]string, len(package_json.workspaces))
for s, i in package_json.workspaces {
workspaces[i] = strings.clone(s)
}
scripts := make(map[string]string)
for k, v in package_json.scripts {
scripts[strings.clone(k)] = strings.clone(v)
}
return {name = strings.clone(package_json.name), workspaces = workspaces, scripts = scripts},
true
}
destroy_package_json :: proc(package_json: Package_Json) {
delete(package_json.name)
for _, i in package_json.workspaces {
delete(package_json.workspaces[i])
}
delete(package_json.workspaces)
for k, v in package_json.scripts {
delete(k)
delete(v)
}
delete(package_json.scripts)
}
collect_workspaces :: proc(script: string) -> map[string]Package_Json {
root_package_json: Package_Json
ok: bool
root_package_json, ok = read_package_json("package.json")
if !ok {
fmt.printf("failed reading root package.json\n")
os.exit(-1)
}
defer destroy_package_json(root_package_json)
workspace_rules := root_package_json.workspaces
package_jsons := make([dynamic]string)
defer delete(package_jsons)
w := filepath.walker_create(".")
defer filepath.walker_destroy(&w)
for info in filepath.walker_walk(&w) {
_, err := os.walker_error(&w)
if err != nil {
fmt.printf("error walking %v\n", err)
os.exit(-1)
}
curr_dir, _ := os.get_working_directory(context.allocator)
rel_path, _ := filepath.rel(curr_dir, info.fullpath)
defer delete(rel_path)
defer delete(curr_dir)
if rel_path == "package.json" {
// NOTE: Ignore root package.json
continue
}
if slice.contains(IGNORED_FILEPATHS, info.name) {
continue
}
if info.name == "package.json" {
dir := filepath.dir(rel_path)
defer delete(dir)
matched_rule := false
for r in workspace_rules {
if (strings.contains(r, "*") && strings.has_prefix(rel_path, r[:len(r) - 2])) ||
r == dir {
matched_rule = true
}
}
if !matched_rule {
continue
}
append(&package_jsons, strings.clone(rel_path))
}
}
workspaces := make(map[string]Package_Json)
for json_path, i in package_jsons {
package_json: Package_Json
package_json, ok = read_package_json(json_path)
if !ok {
continue
}
if script in package_json.scripts {
workspaces[json_path] = package_json
} else {
destroy_package_json(package_json)
delete(package_jsons[i])
}
}
return workspaces
}
Worker_Data :: struct {
workspace: string,
script: string,
}
Run_Result :: struct {
workspace: string,
result: bool,
}
Worker :: struct {
pool_channel: chan.Chan(Worker_Data, .Recv),
result_channel: chan.Chan(Run_Result, .Send),
}
worker_func :: proc(w: Worker) {
for {
data, ok := chan.recv(w.pool_channel)
if !ok {
break
}
result := execute_npm_script(data.workspace, data.script)
chan.send(w.result_channel, Run_Result{workspace = data.workspace, result = result})
}
}
Npm_Scripts_Worker :: struct {
workspaces: map[string]Package_Json,
script: string,
pool_channel: chan.Chan(Worker_Data, .Send),
}
execute_npm_scripts_worker :: proc(data: ^Npm_Scripts_Worker) {
defer free(data)
for _, w in data.workspaces {
if ok := chan.send(
data.pool_channel,
Worker_Data{workspace = strings.clone(w.name), script = strings.clone(data.script)},
); !ok {
break
}
}
}
execute_npm_script :: proc(workspace, script: string) -> bool {
command := strings.clone_to_cstring(
fmt.tprintf("npm run %s -w %s --if-present 2>&1", script, workspace),
)
defer delete(command)
fp := posix.popen(command, "r")
if fp == nil {
fmt.printf("popen failure\n")
return false
}
output: [1024]byte
for posix.fgets(raw_data(output[:]), len(output), fp) != nil {
// posix.printf("%s", &output)
_ = output
}
status := posix.pclose(fp)
if status == -1 {
// fmt.printf("pclose failure: %v\n", status)
return false
} else if status < -1 || status > 0 {
// fmt.printf("command failed: %v\n", status)
return false
}
return true
}
Options :: struct {
script: string `args:"pos=0,required" usage:"The npm script to run"`,
root: string `usage:"The root path to use (default ./)"`,
concurrent: int `usage:"The number of concurrent scripts to run (defaults to num_cpu - 1)"`,
}
main :: proc() {
when ODIN_DEBUG {
track: mem.Tracking_Allocator
mem.tracking_allocator_init(&track, context.allocator)
context.allocator = mem.tracking_allocator(&track)
defer {
if len(track.allocation_map) > 0 {
fmt.eprintf("=== %v allocations not freed: ===\n", len(track.allocation_map))
for _, entry in track.allocation_map {
fmt.eprintf("- %v bytes @ %v\n", entry.size, entry.location)
}
}
mem.tracking_allocator_destroy(&track)
}
} else {
// NOTE: Dummy code to let it ignore the mem library
_ = mem.nil_allocator()
}
opt: Options
style: flags.Parsing_Style = .Odin
flags.parse_or_exit(&opt, os.args, style)
if opt.root == "" {
opt.root = "./"
}
if !strings.has_suffix(opt.root, "/") {
opt.root = fmt.tprintf("%s/", opt.root)
}
if opt.concurrent == 0 {
num_cpus := get_hardware_concurrency()
opt.concurrent = num_cpus - 1 if num_cpus > 1 else 4
}
os.set_working_directory(opt.root)
console: vtty.Console
vtty.create(&console)
defer {
vtty.write(&console, vtty.RESET_FORMAT)
vtty.write(&console, vtty.CURSOR_SHOW)
vtty.destroy(&console)
}
vtty.write(&console, vtty.CURSOR_HIDE)
when ODIN_OS == .Windows {
windows.SetConsoleMode(
windows.GetStdHandle(windows.STD_OUTPUT_HANDLE),
windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
)
windows.SetConsoleOutputCP(windows.CP_UTF8)
}
workspaces := collect_workspaces(opt.script)
if len(workspaces) == 0 {
fmt.printf("Nothing to do (everything filtered?)\n")
os.exit(-1)
}
defer {
for k, w in workspaces {
destroy_package_json(w)
delete(k)
}
delete(workspaces)
}
fmt.printf(
"Running \"%s\" over %d workspaces using %d workers\n",
opt.script,
len(workspaces) - 1,
opt.concurrent,
)
start := time.tick_now()
pool_channel, _ := chan.create(chan.Chan(Worker_Data), context.allocator)
defer chan.destroy(pool_channel)
result_channel, _ := chan.create_buffered(chan.Chan(Run_Result), 100, context.allocator)
defer chan.destroy(result_channel)
for _ in 0 ..< opt.concurrent {
thread.run_with_poly_data(
Worker {
pool_channel = chan.as_recv(pool_channel),
result_channel = chan.as_send(result_channel),
},
worker_func,
)
}
npm_scripts_data := new(Npm_Scripts_Worker)
npm_scripts_data^ = Npm_Scripts_Worker {
workspaces = workspaces,
script = opt.script,
pool_channel = chan.as_send(pool_channel),
}
thread.run_with_poly_data(npm_scripts_data, execute_npm_scripts_worker)
current_glyph := 0
spinner := []rune{'ᚠ', 'ᚢ', 'ᚦ', 'ᚨ', 'ᚱ', 'ᚲ', 'ᚷ', 'ᚹ'}
has_error: bool
finished := 0
for {
result, ok := chan.try_recv(result_channel)
if ok {
finished += 1
has_error = has_error if result.result else false
fmt.printf(
"\r%s %s (%d/%d)\n",
"🟢" if result.result else "🔴",
result.workspace,
finished,
len(workspaces) - 1,
)
if finished >= len(workspaces) - 1 {
break
}
}
fmt.printf("\r%c", spinner[current_glyph])
current_glyph = (current_glyph + 1) % len(spinner)
time.sleep(time.Millisecond * 100)
}
buf: [time.MIN_HMS_LEN]u8
fmt.printf(
"Finished%s in %s!\n",
" (with errors)" if has_error else "",
time.to_string_hms(time.tick_since(start), buf[:]),
)
}