From 8de653bf3a5d31827c88724115abc925554bb884 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Wed, 27 Aug 2025 18:03:37 +0200 Subject: [PATCH 01/17] chore: update benchmarks --- example-codspeed/fib_codspeed.go | 38 ++++++++++++++--------------- example/sleep_test.go | 42 ++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 19 deletions(-) diff --git a/example-codspeed/fib_codspeed.go b/example-codspeed/fib_codspeed.go index 680cdde1..b8a121de 100644 --- a/example-codspeed/fib_codspeed.go +++ b/example-codspeed/fib_codspeed.go @@ -5,31 +5,31 @@ import ( ) func BenchmarkFibonacci10(b *testing.B) { - // b.Run("fibonacci(40)", func(b *testing.B) { - // for i := 0; i < b.N; i++ { - // fibonacci(10) - // } - // }) - // b.Run("fibonacci(20)", func(b *testing.B) { - // for i := 0; i < b.N; i++ { - // fibonacci(20) - // } - // }) - b.RunParallel(func(b *testing.PB) { - for b.Next() { - fibonacci(30) + b.Run("fibonacci(10)", func(b *testing.B) { + for i := 0; i < b.N; i++ { + fibonacci(10) + } + }) + b.Run("fibonacci(20)", func(b *testing.B) { + for i := 0; i < b.N; i++ { + fibonacci(20) } }) + // b.RunParallel(func(b *testing.PB) { + // for b.Next() { + // fibonacci(30) + // } + // }) } func BenchmarkFibonacci20(b *testing.B) { for b.Loop() { - fibonacci(30) + fibonacci(20) } } -// func BenchmarkFibonacci30(b *testing.B) { -// for i := 0; i < b.N; i++ { -// fibonacci(30) -// } -// } +func BenchmarkFibonacci30(b *testing.B) { + for i := 0; i < b.N; i++ { + fibonacci(30) + } +} diff --git a/example/sleep_test.go b/example/sleep_test.go index 85af0ee6..6e35fae2 100644 --- a/example/sleep_test.go +++ b/example/sleep_test.go @@ -18,38 +18,80 @@ func BenchmarkSleep100ns(b *testing.B) { } } +func BenchmarkSleep100ns_Loop(b *testing.B) { + for b.Loop() { + busyWait(100 * time.Nanosecond) + } +} + func BenchmarkSleep1us(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(1 * time.Microsecond) } } +func BenchmarkSleep1us_Loop(b *testing.B) { + for b.Loop() { + busyWait(1 * time.Microsecond) + } +} + func BenchmarkSleep10us(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(10 * time.Microsecond) } } +func BenchmarkSleep10us_Loop(b *testing.B) { + for b.Loop() { + busyWait(10 * time.Microsecond) + } +} + func BenchmarkSleep100us(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(100 * time.Microsecond) } } +func BenchmarkSleep100us_Loop(b *testing.B) { + for b.Loop() { + busyWait(100 * time.Microsecond) + } +} + func BenchmarkSleep1ms(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(1 * time.Millisecond) } } +func BenchmarkSleep1ms_Loop(b *testing.B) { + for b.Loop() { + busyWait(1 * time.Millisecond) + } +} + func BenchmarkSleep10ms(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(10 * time.Millisecond) } } +func BenchmarkSleep10ms_Loop(b *testing.B) { + for b.Loop() { + busyWait(10 * time.Millisecond) + } +} + func BenchmarkSleep50ms(b *testing.B) { for i := 0; i < b.N; i++ { busyWait(50 * time.Millisecond) } } + +func BenchmarkSleep50ms_Loop(b *testing.B) { + for b.Loop() { + busyWait(50 * time.Millisecond) + } +} From 67b1e2909e4c5ea7686151c615142181b0509370 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Wed, 27 Aug 2025 17:40:10 +0200 Subject: [PATCH 02/17] feat: add instrument-hooks artifacts --- testing/capi/instrument-hooks/dist/core.c | 7686 +++++++++++++++++ .../instrument-hooks/includes/callgrind.h | 124 + testing/capi/instrument-hooks/includes/core.h | 77 + .../capi/instrument-hooks/includes/valgrind.h | 7168 +++++++++++++++ testing/capi/instrument-hooks/includes/zig.h | 4209 +++++++++ testing/capi/vendor.sh | 22 + 6 files changed, 19286 insertions(+) create mode 100644 testing/capi/instrument-hooks/dist/core.c create mode 100644 testing/capi/instrument-hooks/includes/callgrind.h create mode 100644 testing/capi/instrument-hooks/includes/core.h create mode 100644 testing/capi/instrument-hooks/includes/valgrind.h create mode 100644 testing/capi/instrument-hooks/includes/zig.h create mode 100755 testing/capi/vendor.sh diff --git a/testing/capi/instrument-hooks/dist/core.c b/testing/capi/instrument-hooks/dist/core.c new file mode 100644 index 00000000..7ab1d46b --- /dev/null +++ b/testing/capi/instrument-hooks/dist/core.c @@ -0,0 +1,7686 @@ +// This file is generated by scripts/release.py +// Do not edit it manually. +#ifdef _WIN32 +// Windows stub implementations - instrumentation not supported on Windows +#include +#include + +typedef struct InstrumentHooks { + char reserved; +} InstrumentHooks; + +InstrumentHooks* instrument_hooks_init() { + static InstrumentHooks instance = {}; + return &instance; +} + +void instrument_hooks_deinit(InstrumentHooks* hooks) {} + +bool instrument_hooks_is_instrumented(InstrumentHooks* hooks) { return false; } + +uint8_t instrument_hooks_start_benchmark(InstrumentHooks* hooks) { return 0; } + +uint8_t instrument_hooks_stop_benchmark(InstrumentHooks* hooks) { return 0; } + +uint8_t instrument_hooks_set_executed_benchmark(InstrumentHooks* hooks, + uint32_t pid, const char* uri) { + return 0; +} + +// Deprecated: use instrument_hooks_set_executed_benchmark instead +uint8_t instrument_hooks_executed_benchmark(InstrumentHooks* hooks, + uint32_t pid, const char* uri) { + return 0; +} + +uint8_t instrument_hooks_set_integration(InstrumentHooks* hooks, + const char* name, + const char* version) { + return 0; +} + +void instrument_hooks_set_feature(InstrumentHooks* hooks, uint64_t feature, + bool enabled) {} + +#else +#define ZIG_TARGET_MAX_INT_ALIGNMENT 16 +#include "zig.h" +struct anon__lazy_57 { + uint8_t const *ptr; + uintptr_t len; +}; +struct fifo_UnixPipe_Writer__600; +typedef struct anon__lazy_72 nav__1060_39; +struct mem_Allocator__565; +typedef struct anon__lazy_57 nav__1060_42; +struct mem_Allocator_VTable__568; +struct mem_Allocator__565 { + void *ptr; + struct mem_Allocator_VTable__568 const *vtable; +}; +struct fs_File__608; +struct fs_File__608 { + int32_t handle; +}; +struct fifo_UnixPipe_Writer__600 { + struct mem_Allocator__565 allocator; + struct fs_File__608 file; +}; +struct anon__lazy_72 { + struct fifo_UnixPipe_Writer__600 payload; + uint16_t error; +}; +struct fs_File_OpenFlags__1897; +struct fs_File_OpenFlags__1897 { + uint8_t mode; + uint8_t lock; + bool lock_nonblocking; + bool allow_ctty; +}; +typedef struct anon__lazy_86 nav__1060_56; +struct anon__lazy_86 { + struct fs_File__608 payload; + uint16_t error; +}; +struct fifo_UnixPipe_Reader__602; +typedef struct anon__lazy_89 nav__1059_39; +typedef struct anon__lazy_57 nav__1059_42; +struct fifo_UnixPipe_Reader__602 { + struct mem_Allocator__565 allocator; + struct fs_File__608 file; +}; +struct anon__lazy_89 { + struct fifo_UnixPipe_Reader__602 payload; + uint16_t error; +}; +typedef struct anon__lazy_86 nav__1059_56; +struct instruments_perf_PerfInstrument__559; +typedef struct anon__lazy_95 nav__739_39; +struct instruments_perf_PerfInstrument__559 { + struct mem_Allocator__565 allocator; + struct fifo_UnixPipe_Writer__600 writer; + struct fifo_UnixPipe_Reader__602 reader; +}; +struct anon__lazy_95 { + struct instruments_perf_PerfInstrument__559 payload; + uint16_t error; +}; +typedef struct anon__lazy_57 nav__739_59; +typedef struct anon__lazy_72 nav__739_61; +typedef struct anon__lazy_89 nav__739_66; +struct instruments_root_InstrumentHooks__547; +typedef struct anon__lazy_106 nav__715_39; +struct instruments_valgrind_ValgrindInstrument__554; +struct instruments_valgrind_ValgrindInstrument__554 { + struct mem_Allocator__565 allocator; +}; +struct instruments_root_InstrumentHooks__547 { + union { + struct instruments_valgrind_ValgrindInstrument__554 valgrind; + struct instruments_perf_PerfInstrument__559 perf; + } payload; + uint8_t tag; +}; +struct anon__lazy_106 { + struct instruments_root_InstrumentHooks__547 payload; + uint16_t error; +}; +typedef struct anon__lazy_95 nav__715_60; +typedef struct anon__lazy_116 nav__235_46; +struct anon__lazy_116 { + struct instruments_root_InstrumentHooks__547 *payload; + uint16_t error; +}; +typedef struct anon__lazy_120 nav__235_51; +struct mem_Allocator_VTable__568 { + uint8_t *(*alloc)(void *, uintptr_t, uint8_t, uintptr_t); + bool (*resize)(void *, struct anon__lazy_120, uint8_t, uintptr_t, uintptr_t); + uint8_t *(*remap)(void *, struct anon__lazy_120, uint8_t, uintptr_t, uintptr_t); + void (*free)(void *, struct anon__lazy_120, uint8_t, uintptr_t); +}; +typedef struct anon__lazy_106 nav__235_71; +typedef struct anon__lazy_116 nav__3561_40; +typedef struct anon__lazy_131 nav__3561_51; +struct anon__lazy_131 { + uint8_t *payload; + uint16_t error; +}; +typedef struct anon__lazy_57 nav__1126_39; +struct fs_Dir__1899; +struct fs_Dir__1899 { + int32_t fd; +}; +typedef struct anon__lazy_86 nav__1123_39; +typedef struct anon__lazy_57 nav__1123_41; +struct shared_Command__2009; +struct shared_Command__struct_2012__2012; +typedef struct anon__lazy_57 nav__1064_44; +struct shared_Command__struct_2012__2012 { + struct anon__lazy_57 uri; + uint32_t pid; +}; +struct shared_Command__struct_2013__2013; +struct shared_Command__struct_2013__2013 { + struct anon__lazy_57 name; + struct anon__lazy_57 version; +}; +struct shared_Command__2009 { + union { + struct shared_Command__struct_2012__2012 ExecutedBenchmark; + struct shared_Command__struct_2013__2013 SetIntegration; + } payload; + uint8_t tag; +}; +struct array_list_ArrayListAligned_28u8_2cnull_29__2040; +typedef struct anon__lazy_120 nav__1064_55; +struct anon__lazy_120 { + uint8_t *ptr; + uintptr_t len; +}; +struct array_list_ArrayListAligned_28u8_2cnull_29__2040 { + struct anon__lazy_120 items; + uintptr_t capacity; + struct mem_Allocator__565 allocator; +}; +struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071; +struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *context; +}; +typedef struct anon__lazy_165 nav__4242_38; +struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246; +struct anon__lazy_165 { + uint32_t payload; + uint16_t error; +}; +struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223; +struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *context; +}; +struct io_Reader__2372; +typedef struct anon__lazy_177 nav__4242_50; +typedef struct anon__lazy_120 nav__4242_52; +struct io_Reader__2372 { + void const *context; + struct anon__lazy_177 (*readFn)(void const *, struct anon__lazy_120); +}; +struct anon__lazy_177 { + uintptr_t payload; + uint16_t error; +}; +typedef struct anon__lazy_187 nav__4242_64; +struct anon__lazy_187 { + uint16_t error; + uint8_t payload[4]; +}; +typedef struct anon__lazy_165 nav__4241_38; +typedef struct anon__lazy_120 nav__4284_39; +typedef struct anon__lazy_191 nav__4284_40; +struct anon__lazy_191 { + struct anon__lazy_120 payload; + uint16_t error; +}; +typedef struct anon__lazy_177 nav__4284_59; +typedef struct anon__lazy_195 nav__4284_69; +struct anon__lazy_195 { + uint64_t payload; + uint16_t error; +}; +typedef struct anon__lazy_198 nav__4284_72; +struct anon__lazy_198 { + uint16_t error; + uint8_t payload[8]; +}; +typedef struct anon__lazy_57 nav__4283_39; +typedef struct anon__lazy_201 nav__4283_40; +struct anon__lazy_201 { + struct anon__lazy_57 payload; + uint16_t error; +}; +typedef struct anon__lazy_120 nav__4283_55; +typedef struct anon__lazy_191 nav__4283_57; +typedef struct anon__lazy_205 nav__4282_39; +typedef struct anon__lazy_57 nav__4282_44; +struct anon__lazy_205 { + struct shared_Command__struct_2012__2012 payload; + uint16_t error; +}; +typedef struct anon__lazy_165 nav__4282_57; +typedef struct anon__lazy_201 nav__4282_60; +typedef struct anon__lazy_205 nav__4281_39; +typedef struct anon__lazy_57 nav__4281_44; +typedef struct anon__lazy_213 nav__4289_39; +typedef struct anon__lazy_57 nav__4289_44; +struct anon__lazy_213 { + struct shared_Command__struct_2013__2013 payload; + uint16_t error; +}; +typedef struct anon__lazy_201 nav__4289_57; +typedef struct anon__lazy_213 nav__4288_39; +typedef struct anon__lazy_57 nav__4288_44; +typedef struct anon__lazy_217 nav__4240_39; +typedef struct anon__lazy_57 nav__4240_45; +struct anon__lazy_217 { + struct shared_Command__2009 payload; + uint16_t error; +}; +typedef struct anon__lazy_165 nav__4240_61; +typedef struct anon__lazy_205 nav__4240_63; +typedef struct anon__lazy_213 nav__4240_65; +typedef struct anon__lazy_217 nav__4239_39; +typedef struct anon__lazy_57 nav__4239_45; +typedef struct anon__lazy_217 nav__1069_39; +typedef struct anon__lazy_57 nav__1069_45; +typedef struct anon__lazy_120 nav__1069_66; +typedef struct anon__lazy_177 nav__1069_69; +typedef struct anon__lazy_191 nav__1069_73; +struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 { + struct anon__lazy_120 buffer; + uintptr_t pos; +}; +typedef struct anon__lazy_217 nav__1070_39; +typedef struct anon__lazy_226 nav__1070_42; +typedef struct anon__lazy_57 nav__1070_46; +struct anon__lazy_226 { + uint64_t payload; + bool is_null; +}; +typedef struct anon__lazy_226 nav__1071_40; +typedef struct anon__lazy_57 nav__1071_48; +typedef struct anon__lazy_217 nav__1071_55; +typedef struct anon__lazy_57 nav__741_44; +typedef struct anon__lazy_226 nav__741_67; +typedef struct anon__lazy_57 nav__742_45; +typedef struct anon__lazy_131 nav__3810_39; +typedef struct anon__lazy_120 nav__3810_52; +typedef struct anon__lazy_57 nav__3845_39; +typedef struct anon__lazy_57 nav__3766_40; +typedef struct anon__lazy_249 nav__3766_49; +struct anon__lazy_249 { + uint16_t error; + uint8_t payload[4096]; +}; +typedef struct anon__lazy_86 nav__3702_39; +typedef struct anon__lazy_57 nav__3702_42; +typedef struct anon__lazy_249 nav__3702_53; +typedef struct anon__lazy_120 nav__3958_42; +typedef struct anon__lazy_57 nav__4131_46; +typedef struct anon__lazy_120 nav__3960_41; +typedef struct anon__lazy_57 nav__1217_40; +typedef struct anon__lazy_177 nav__1217_47; +struct os_linux_timespec__struct_2818__2818; +struct os_linux_timespec__struct_2818__2818 { + intptr_t sec; + intptr_t nsec; +}; +typedef struct anon__lazy_272 nav__4145_41; +struct anon__lazy_272 { + struct os_linux_timespec__struct_2818__2818 payload; + uint16_t error; +}; +typedef struct anon__lazy_177 nav__1207_38; +typedef struct anon__lazy_120 nav__1207_41; +typedef struct anon__lazy_120 nav__4173_39; +typedef struct anon__lazy_191 nav__4173_40; +typedef struct anon__lazy_131 nav__4173_51; +typedef struct anon__lazy_120 nav__4205_40; +typedef struct anon__lazy_177 nav__4238_38; +typedef struct anon__lazy_120 nav__4238_41; +typedef struct anon__lazy_187 nav__4280_39; +typedef struct anon__lazy_177 nav__4280_44; +typedef struct anon__lazy_120 nav__4280_46; +typedef struct anon__lazy_198 nav__4285_39; +typedef struct anon__lazy_177 nav__4285_44; +typedef struct anon__lazy_120 nav__4285_46; +typedef struct anon__lazy_177 nav__4245_38; +typedef struct anon__lazy_120 nav__4245_41; +typedef struct anon__lazy_57 nav__4286_40; +typedef struct anon__lazy_294 nav__4286_42; +struct anon__lazy_294 { + uintptr_t payload; + bool is_null; +}; +typedef struct anon__lazy_120 nav__4290_40; +struct cimport_struct_timespec__2992; +struct cimport_struct_timespec__2992 { + long tv_sec; + long tv_nsec; +}; +typedef struct anon__lazy_304 nav__4295_42; +struct anon__lazy_304 { + long payload; + bool is_null; +}; +typedef struct anon__lazy_57 nav__3908_43; +typedef struct anon__lazy_57 nav__3851_39; +typedef struct anon__lazy_249 nav__1701_39; +typedef struct anon__lazy_57 nav__1701_41; +typedef struct anon__lazy_120 nav__1701_47; +typedef struct anon__lazy_86 nav__3703_39; +typedef struct anon__lazy_317 nav__3703_50; +struct anon__lazy_317 { + int32_t payload; + uint16_t error; +}; +typedef struct anon__lazy_57 nav__4309_46; +typedef struct anon__lazy_120 nav__4006_39; +typedef struct anon__lazy_177 nav__1216_38; +typedef struct anon__lazy_57 nav__1216_41; +typedef struct anon__lazy_272 nav__1617_39; +typedef struct anon__lazy_177 nav__1206_38; +typedef struct anon__lazy_120 nav__1206_41; +typedef struct anon__lazy_131 nav__4310_39; +typedef struct anon__lazy_177 nav__4310_49; +typedef struct anon__lazy_177 nav__4197_38; +typedef struct anon__lazy_120 nav__4197_42; +typedef struct anon__lazy_120 nav__4247_40; +typedef struct anon__lazy_177 nav__4247_43; +typedef struct anon__lazy_177 nav__4246_38; +typedef struct anon__lazy_120 nav__4246_41; +typedef struct anon__lazy_57 nav__4312_39; +typedef struct anon__lazy_294 nav__4312_40; +typedef struct anon__lazy_120 nav__4339_39; +typedef struct anon__lazy_304 nav__5194_38; +typedef struct anon__lazy_57 nav__5974_40; +typedef struct anon__lazy_120 nav__5974_51; +typedef struct anon__lazy_317 nav__1448_38; +typedef struct anon__lazy_57 nav__6101_45; +typedef struct anon__lazy_57 nav__6103_45; +typedef struct anon__lazy_177 nav__1436_38; +typedef struct anon__lazy_57 nav__1436_40; +typedef struct anon__lazy_177 nav__1428_38; +typedef struct anon__lazy_120 nav__1428_40; +typedef struct anon__lazy_177 nav__6107_38; +typedef struct anon__lazy_364 nav__6107_42; +struct anon__lazy_364 { + uintptr_t f0; + uint8_t f1; +}; +typedef struct anon__lazy_131 nav__6108_39; +typedef struct anon__lazy_120 nav__6108_52; +typedef struct anon__lazy_177 nav__4244_38; +typedef struct anon__lazy_120 nav__4244_41; +typedef struct anon__lazy_57 nav__6109_39; +struct io_Writer__3718; +typedef struct anon__lazy_177 nav__6113_48; +typedef struct anon__lazy_57 nav__6113_50; +struct io_Writer__3718 { + void const *context; + struct anon__lazy_177 (*writeFn)(void const *, struct anon__lazy_57); +}; +typedef struct anon__lazy_57 nav__6114_45; +typedef struct anon__lazy_57 nav__6115_45; +typedef struct anon__lazy_177 nav__4130_38; +typedef struct anon__lazy_57 nav__4130_41; +typedef struct anon__lazy_57 nav__6127_40; +typedef struct anon__lazy_177 nav__6127_43; +typedef struct anon__lazy_57 nav__6136_40; +typedef struct anon__lazy_177 nav__3984_38; +typedef struct anon__lazy_57 nav__3984_42; +typedef struct anon__lazy_177 nav__6126_38; +typedef struct anon__lazy_57 nav__6126_41; +typedef struct anon__lazy_57 nav__6137_40; +typedef struct anon__lazy_177 nav__6137_51; +typedef struct anon__lazy_57 nav__3978_41; +typedef struct anon__lazy_120 nav__3997_43; +typedef struct anon__lazy_177 nav__3997_52; +typedef struct anon__lazy_57 nav__3979_41; +typedef struct anon__lazy_120 nav__3979_47; +typedef struct anon__lazy_177 nav__3922_38; +typedef struct anon__lazy_364 nav__3922_42; +typedef struct anon__lazy_120 nav__3995_43; +typedef struct anon__lazy_120 nav__3996_43; +typedef struct anon__lazy_191 nav__3996_55; +typedef struct anon__lazy_120 nav__6223_39; +typedef struct anon__lazy_177 nav__6223_50; +typedef struct anon__lazy_120 nav__6224_39; +typedef struct anon__lazy_191 nav__6224_40; +typedef struct anon__lazy_131 nav__6224_51; +typedef struct anon__lazy_120 nav__6225_39; +typedef struct anon__lazy_120 nav__236_63; +typedef struct anon__lazy_120 nav__6226_52; +typedef struct anon__lazy_57 nav__743_58; +typedef struct anon__lazy_226 nav__743_66; +typedef struct anon__lazy_57 nav__744_58; +typedef struct anon__lazy_226 nav__744_66; +typedef struct anon__lazy_57 nav__745_56; +typedef struct anon__lazy_226 nav__745_66; +typedef struct anon__lazy_57 nav__6231_39; +typedef struct anon__lazy_426 nav__6233_45; +struct anon__lazy_426 { + bool array[32]; +}; +typedef struct anon__lazy_428 nav__6233_47; +struct anon__lazy_428 { + bool is_null; + uint8_t payload; +}; +typedef struct anon__lazy_428 nav__6599_38; +typedef struct anon__lazy_426 nav__6599_40; +typedef struct anon__lazy_57 nav__746_56; +typedef struct anon__lazy_226 nav__746_66; +typedef struct anon__lazy_441 nav__242_62; +struct anon__lazy_441 { + uint8_t const *f0; + uint8_t const *f1; +}; +typedef struct anon__lazy_120 nav__242_65; +typedef struct anon__lazy_191 nav__242_67; +typedef struct anon__lazy_120 nav__6607_39; +typedef struct anon__lazy_191 nav__6607_40; +typedef struct anon__lazy_441 nav__6607_43; +typedef struct anon__lazy_120 nav__6608_40; +typedef struct anon__lazy_120 nav__6609_39; +typedef struct anon__lazy_191 nav__6609_40; +typedef struct anon__lazy_441 nav__6609_43; +typedef struct anon__lazy_294 nav__6609_54; +typedef struct anon__lazy_120 nav__6610_39; +typedef struct anon__lazy_441 nav__6652_40; +typedef struct anon__lazy_177 nav__6652_43; +typedef struct anon__lazy_57 nav__6652_44; +struct fmt_FormatOptions__4756; +typedef struct anon__lazy_294 nav__6652_55; +struct fmt_FormatOptions__4756 { + struct anon__lazy_294 precision; + struct anon__lazy_294 width; + uint32_t fill; + uint8_t alignment; +}; +typedef struct anon__lazy_441 nav__6611_39; +struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403; +struct io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29__4395; +struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 { + uint64_t bytes_written; +}; +struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423; +struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *context; +}; +typedef struct anon__lazy_177 nav__6611_53; +typedef struct anon__lazy_57 nav__6611_54; +typedef struct anon__lazy_294 nav__6612_38; +typedef struct anon__lazy_120 nav__6613_39; +typedef struct anon__lazy_191 nav__6613_40; +typedef struct anon__lazy_441 nav__6613_42; +struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814; +struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *context; +}; +typedef struct anon__lazy_177 nav__6613_57; +typedef struct anon__lazy_57 nav__6613_58; +typedef struct anon__lazy_177 nav__6651_38; +typedef struct anon__lazy_57 nav__6651_41; +typedef struct anon__lazy_294 nav__6820_42; +typedef struct anon__lazy_177 nav__6820_46; +typedef struct anon__lazy_57 nav__6820_47; +typedef struct anon__lazy_177 nav__6833_38; +typedef struct anon__lazy_57 nav__6833_41; +typedef struct anon__lazy_120 nav__4203_39; +typedef struct anon__lazy_177 nav__6636_38; +typedef struct anon__lazy_57 nav__6636_42; +typedef struct anon__lazy_484 nav__6664_38; +struct anon__lazy_484 { + uint16_t error; + uint8_t payload; +}; +typedef struct anon__lazy_177 nav__6680_38; +typedef struct anon__lazy_57 nav__6680_40; +typedef struct anon__lazy_484 nav__6680_48; +typedef struct anon__lazy_165 nav__6680_50; +typedef struct anon__lazy_57 nav__6834_39; +typedef struct anon__lazy_294 nav__6834_44; +typedef struct anon__lazy_177 nav__6834_48; +typedef struct anon__lazy_120 nav__6834_57; +typedef struct anon__lazy_484 nav__6834_60; +typedef struct anon__lazy_177 nav__4198_38; +typedef struct anon__lazy_57 nav__4198_42; +typedef struct anon__lazy_120 nav__4198_50; +typedef struct anon__lazy_177 nav__4108_38; +typedef struct anon__lazy_57 nav__4108_40; +typedef struct anon__lazy_165 nav__6670_38; +typedef struct anon__lazy_57 nav__6670_40; +typedef struct anon__lazy_499 nav__6670_48; +struct anon__lazy_499 { + uint8_t array[2]; +}; +typedef struct anon__lazy_501 nav__6670_52; +struct anon__lazy_501 { + uint8_t array[3]; +}; +typedef struct anon__lazy_503 nav__6670_56; +struct anon__lazy_503 { + uint8_t array[4]; +}; +typedef struct anon__lazy_484 nav__6663_38; +typedef struct anon__lazy_484 nav__6835_38; +typedef struct anon__lazy_120 nav__6835_40; +typedef struct anon__lazy_484 nav__6665_38; +typedef struct anon__lazy_120 nav__6665_40; +typedef struct anon__lazy_57 nav__6131_40; +typedef struct anon__lazy_177 nav__6131_43; +typedef struct anon__lazy_165 nav__6672_38; +typedef struct anon__lazy_499 nav__6672_40; +typedef struct anon__lazy_165 nav__6674_38; +typedef struct anon__lazy_501 nav__6674_40; +typedef struct anon__lazy_165 nav__6678_38; +typedef struct anon__lazy_503 nav__6678_40; +typedef struct anon__lazy_165 nav__6676_38; +typedef struct anon__lazy_501 nav__6676_40; +struct Target_Os__625; +union Target_Os_VersionRange__681; +struct SemanticVersion_Range__686; +struct SemanticVersion__684; +typedef struct anon__lazy_57 nav__255_43; +struct SemanticVersion__684 { + uintptr_t major; + uintptr_t minor; + uintptr_t patch; + struct anon__lazy_57 pre; + struct anon__lazy_57 build; +}; +struct SemanticVersion_Range__686 { + struct SemanticVersion__684 zig_e_min; + struct SemanticVersion__684 zig_e_max; +}; +struct Target_Os_HurdVersionRange__688; +struct Target_Os_HurdVersionRange__688 { + struct SemanticVersion_Range__686 range; + struct SemanticVersion__684 glibc; +}; +struct Target_Os_LinuxVersionRange__690; +struct Target_Os_LinuxVersionRange__690 { + struct SemanticVersion_Range__686 range; + struct SemanticVersion__684 glibc; + uint32_t android; +}; +struct Target_Os_WindowsVersion_Range__746; +struct Target_Os_WindowsVersion_Range__746 { + uint32_t zig_e_min; + uint32_t zig_e_max; +}; +union Target_Os_VersionRange__681 { + struct SemanticVersion_Range__686 semver; + struct Target_Os_HurdVersionRange__688 hurd; + struct Target_Os_LinuxVersionRange__690 linux; + struct Target_Os_WindowsVersion_Range__746 windows; +}; +struct Target_Os__625 { + union Target_Os_VersionRange__681 version_range; + uint8_t tag; +}; +struct Target_DynamicLinker__801; +struct Target_DynamicLinker__801 { + uint8_t buffer[255]; + uint8_t len; +}; +typedef struct anon__lazy_120 nav__3558_40; +typedef struct anon__lazy_120 nav__3559_40; +typedef struct anon__lazy_120 nav__3560_40; +typedef struct anon__lazy_120 nav__3522_46; +typedef struct anon__lazy_120 nav__233_46; +struct Target_Cpu_Feature_Set__817; +struct Target_Cpu_Feature_Set__817 { + uintptr_t ints[5]; +}; +struct Target_Cpu__786; +struct Target_Cpu_Model__812; +struct Target_Cpu__786 { + struct Target_Cpu_Model__812 const *model; + struct Target_Cpu_Feature_Set__817 features; + uint8_t arch; +}; +typedef struct anon__lazy_57 nav__254_46; +struct Target_Cpu_Model__812 { + struct anon__lazy_57 name; + struct anon__lazy_57 llvm_name; + struct Target_Cpu_Feature_Set__817 features; +}; +struct Target__623; +typedef struct anon__lazy_57 nav__256_51; +struct Target__623 { + struct Target_Cpu__786 cpu; + struct Target_Os__625 os; + uint8_t abi; + uint8_t ofmt; + struct Target_DynamicLinker__801 dynamic_linker; +}; +struct builtin_CallingConvention__266; +struct builtin_CallingConvention_CommonOptions__268; +typedef struct anon__lazy_226 nav__455_40; +struct builtin_CallingConvention_CommonOptions__268 { + struct anon__lazy_226 incoming_stack_alignment; +}; +struct builtin_CallingConvention_X86RegparmOptions__270; +struct builtin_CallingConvention_X86RegparmOptions__270 { + struct anon__lazy_226 incoming_stack_alignment; + uint8_t register_params; +}; +struct builtin_CallingConvention_ArmInterruptOptions__272; +struct builtin_CallingConvention_ArmInterruptOptions__272 { + struct anon__lazy_226 incoming_stack_alignment; + uint8_t type; +}; +struct builtin_CallingConvention_MipsInterruptOptions__274; +struct builtin_CallingConvention_MipsInterruptOptions__274 { + struct anon__lazy_226 incoming_stack_alignment; + uint8_t mode; +}; +struct builtin_CallingConvention_RiscvInterruptOptions__276; +struct builtin_CallingConvention_RiscvInterruptOptions__276 { + struct anon__lazy_226 incoming_stack_alignment; + uint8_t mode; +}; +struct builtin_CallingConvention__266 { + union { + struct builtin_CallingConvention_CommonOptions__268 x86_64_sysv; + struct builtin_CallingConvention_CommonOptions__268 x86_64_win; + struct builtin_CallingConvention_CommonOptions__268 x86_64_regcall_v3_sysv; + struct builtin_CallingConvention_CommonOptions__268 x86_64_regcall_v4_win; + struct builtin_CallingConvention_CommonOptions__268 x86_64_vectorcall; + struct builtin_CallingConvention_CommonOptions__268 x86_64_interrupt; + struct builtin_CallingConvention_X86RegparmOptions__270 x86_sysv; + struct builtin_CallingConvention_X86RegparmOptions__270 x86_win; + struct builtin_CallingConvention_X86RegparmOptions__270 x86_stdcall; + struct builtin_CallingConvention_CommonOptions__268 x86_fastcall; + struct builtin_CallingConvention_CommonOptions__268 x86_thiscall; + struct builtin_CallingConvention_CommonOptions__268 x86_thiscall_mingw; + struct builtin_CallingConvention_CommonOptions__268 x86_regcall_v3; + struct builtin_CallingConvention_CommonOptions__268 x86_regcall_v4_win; + struct builtin_CallingConvention_CommonOptions__268 x86_vectorcall; + struct builtin_CallingConvention_CommonOptions__268 x86_interrupt; + struct builtin_CallingConvention_CommonOptions__268 aarch64_aapcs; + struct builtin_CallingConvention_CommonOptions__268 aarch64_aapcs_darwin; + struct builtin_CallingConvention_CommonOptions__268 aarch64_aapcs_win; + struct builtin_CallingConvention_CommonOptions__268 aarch64_vfabi; + struct builtin_CallingConvention_CommonOptions__268 aarch64_vfabi_sve; + struct builtin_CallingConvention_CommonOptions__268 arm_aapcs; + struct builtin_CallingConvention_CommonOptions__268 arm_aapcs_vfp; + struct builtin_CallingConvention_ArmInterruptOptions__272 arm_interrupt; + struct builtin_CallingConvention_CommonOptions__268 mips64_n64; + struct builtin_CallingConvention_CommonOptions__268 mips64_n32; + struct builtin_CallingConvention_MipsInterruptOptions__274 mips64_interrupt; + struct builtin_CallingConvention_CommonOptions__268 mips_o32; + struct builtin_CallingConvention_MipsInterruptOptions__274 mips_interrupt; + struct builtin_CallingConvention_CommonOptions__268 riscv64_lp64; + struct builtin_CallingConvention_CommonOptions__268 riscv64_lp64_v; + struct builtin_CallingConvention_RiscvInterruptOptions__276 riscv64_interrupt; + struct builtin_CallingConvention_CommonOptions__268 riscv32_ilp32; + struct builtin_CallingConvention_CommonOptions__268 riscv32_ilp32_v; + struct builtin_CallingConvention_RiscvInterruptOptions__276 riscv32_interrupt; + struct builtin_CallingConvention_CommonOptions__268 sparc64_sysv; + struct builtin_CallingConvention_CommonOptions__268 sparc_sysv; + struct builtin_CallingConvention_CommonOptions__268 powerpc64_elf; + struct builtin_CallingConvention_CommonOptions__268 powerpc64_elf_altivec; + struct builtin_CallingConvention_CommonOptions__268 powerpc64_elf_v2; + struct builtin_CallingConvention_CommonOptions__268 powerpc_sysv; + struct builtin_CallingConvention_CommonOptions__268 powerpc_sysv_altivec; + struct builtin_CallingConvention_CommonOptions__268 powerpc_aix; + struct builtin_CallingConvention_CommonOptions__268 powerpc_aix_altivec; + struct builtin_CallingConvention_CommonOptions__268 wasm_mvp; + struct builtin_CallingConvention_CommonOptions__268 arc_sysv; + struct builtin_CallingConvention_CommonOptions__268 bpf_std; + struct builtin_CallingConvention_CommonOptions__268 csky_sysv; + struct builtin_CallingConvention_CommonOptions__268 csky_interrupt; + struct builtin_CallingConvention_CommonOptions__268 hexagon_sysv; + struct builtin_CallingConvention_CommonOptions__268 hexagon_sysv_hvx; + struct builtin_CallingConvention_CommonOptions__268 lanai_sysv; + struct builtin_CallingConvention_CommonOptions__268 loongarch64_lp64; + struct builtin_CallingConvention_CommonOptions__268 loongarch32_ilp32; + struct builtin_CallingConvention_CommonOptions__268 m68k_sysv; + struct builtin_CallingConvention_CommonOptions__268 m68k_gnu; + struct builtin_CallingConvention_CommonOptions__268 m68k_rtd; + struct builtin_CallingConvention_CommonOptions__268 m68k_interrupt; + struct builtin_CallingConvention_CommonOptions__268 msp430_eabi; + struct builtin_CallingConvention_CommonOptions__268 propeller_sysv; + struct builtin_CallingConvention_CommonOptions__268 s390x_sysv; + struct builtin_CallingConvention_CommonOptions__268 s390x_sysv_vx; + struct builtin_CallingConvention_CommonOptions__268 ve_sysv; + struct builtin_CallingConvention_CommonOptions__268 xcore_xs1; + struct builtin_CallingConvention_CommonOptions__268 xcore_xs2; + struct builtin_CallingConvention_CommonOptions__268 xtensa_call0; + struct builtin_CallingConvention_CommonOptions__268 xtensa_windowed; + struct builtin_CallingConvention_CommonOptions__268 amdgcn_device; + struct builtin_CallingConvention_CommonOptions__268 amdgcn_cs; + } payload; + uint8_t tag; +}; +struct std_Options__4093; +typedef struct anon__lazy_294 nav__97_39; +struct std_Options__4093 { + struct anon__lazy_294 page_size_min; + struct anon__lazy_294 page_size_max; + uintptr_t fmt_max_depth; + bool enable_segfault_handler; + uint8_t log_level; + bool crypto_always_getrandom; + bool crypto_fork_safety; + bool keep_sigpipe; + bool http_disable_tls; + bool http_enable_ssl_key_log_file; + uint8_t side_channels_mitigations; +}; +typedef struct anon__lazy_294 nav__3538_38; +typedef struct anon__lazy_120 nav__3549_43; +typedef struct anon__lazy_57 nav__3664_40; +static uint8_t const __anon_1890[21]; +static uint8_t const __anon_1950[21]; +static uint8_t const __anon_2869[89]; +static uint8_t const __anon_4497[10]; +static uint8_t const __anon_4778[1]; +static uint8_t const __anon_4798[1]; +static uint8_t const __anon_4969[3]; +static uint8_t const __anon_4843[4]; +static uint8_t const __anon_5093[10]; +#define c_instrument_hooks_set_feature__234 instrument_hooks_set_feature +zig_extern void instrument_hooks_set_feature(uint64_t, bool); +static void features_set_feature__315(uint64_t, bool); +static void bit_set_IntegerBitSet_2864_29_set__354(uint64_t *, uintptr_t); +static void bit_set_IntegerBitSet_2864_29_unset__356(uint64_t *, uintptr_t); +static void debug_assert__180(bool); +static uint64_t bit_set_IntegerBitSet_2864_29_maskBit__375(uintptr_t); +static nav__1060_39 fifo_UnixPipe_openWrite__1060(struct mem_Allocator__565, nav__1060_42); +static nav__1059_39 fifo_UnixPipe_openRead__1059(struct mem_Allocator__565, nav__1059_42); +static nav__739_39 instruments_perf_PerfInstrument_init__739(struct mem_Allocator__565); +static nav__715_39 instruments_root_InstrumentHooks_init__715(struct mem_Allocator__565); +#define c_instrument_hooks_init__235 instrument_hooks_init +zig_extern struct instruments_root_InstrumentHooks__547 *instrument_hooks_init(void); +static nav__3561_40 mem_Allocator_create__anon_858__3561(struct mem_Allocator__565); +static struct instruments_valgrind_ValgrindInstrument__554 instruments_valgrind_ValgrindInstrument_init__727(struct mem_Allocator__565); +static uint16_t fs_accessAbsolute__1126(nav__1126_39, struct fs_File_OpenFlags__1897); +static nav__1123_39 fs_openFileAbsolute__1123(nav__1123_41, struct fs_File_OpenFlags__1897); +static struct fifo_UnixPipe_Writer__600 fifo_UnixPipe_Writer_init__1061(struct fs_File__608, struct mem_Allocator__565); +static struct fifo_UnixPipe_Reader__602 fifo_UnixPipe_Reader_init__1066(struct fs_File__608, struct mem_Allocator__565); +static uint16_t fifo_UnixPipe_Writer_sendCmd__1064(struct fifo_UnixPipe_Writer__600 *, struct shared_Command__2009); +static nav__4242_38 bincode_deserializeInt__anon_2357__4242(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246); +static nav__4241_38 bincode_deserializeAlloc__anon_2346__4241(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4284_40 bincode_deserializePointerAlloc__anon_2483__4284(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4283_40 bincode_deserializeAlloc__anon_2470__4283(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4282_39 bincode_deserializeStructAlloc__anon_2445__4282(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4281_39 bincode_deserializeAlloc__anon_2416__4281(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static uint16_t bincode_deserializeAlloc__anon_2535__4287(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4289_39 bincode_deserializeStructAlloc__anon_2597__4289(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4288_39 bincode_deserializeAlloc__anon_2578__4288(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4240_39 bincode_deserializeUnionAlloc__anon_2341__4240(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__4239_39 bincode_deserializeAlloc__anon_2254__4239(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246, struct mem_Allocator__565); +static nav__1069_39 fifo_UnixPipe_Reader_recvCmd__1069(struct fifo_UnixPipe_Reader__602 *); +static nav__1070_39 fifo_UnixPipe_Reader_waitForResponse__1070(struct fifo_UnixPipe_Reader__602 *, nav__1070_42); +static uint16_t fifo_UnixPipe_Reader_waitForAck__1071(struct fifo_UnixPipe_Reader__602 *, nav__1071_40); +static uint16_t instruments_perf_PerfInstrument_send_cmd__741(struct instruments_perf_PerfInstrument__559 *, struct shared_Command__2009); +static bool instruments_perf_PerfInstrument_is_instrumented__742(struct instruments_perf_PerfInstrument__559 *); +static nav__3810_39 mem_Allocator_allocBytesWithAlignment__anon_1979__3810(struct mem_Allocator__565, uintptr_t, uintptr_t); +static bool fs_path_isAbsolute__3845(nav__3845_39); +static struct fs_Dir__1899 fs_cwd__1118(void); +static uint16_t fs_Dir_access__3766(struct fs_Dir__1899, nav__3766_40, struct fs_File_OpenFlags__1897); +static nav__3702_39 fs_Dir_openFile__3702(struct fs_Dir__1899, nav__3702_42, struct fs_File_OpenFlags__1897); +static struct array_list_ArrayListAligned_28u8_2cnull_29__2040 array_list_ArrayListAligned_28u8_2cnull_29_init__3958(struct mem_Allocator__565); +static struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 array_list_ArrayListAligned_28u8_2cnull_29_writer__3983(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *); +static uint16_t bincode_serialize__anon_2081__4131(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__2009); +static void array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(struct array_list_ArrayListAligned_28u8_2cnull_29__2040); +static uint8_t const (*mem_asBytes__anon_2122__4132(uint32_t const *))[4]; +static uint16_t fs_File_writeAll__1217(struct fs_File__608, nav__1217_40); +static zig_i128 time_nanoTimestamp__4145(void); +static nav__1207_38 fs_File_readAll__1207(struct fs_File__608, nav__1207_41); +static nav__4173_40 mem_Allocator_alloc__anon_2204__4173(struct mem_Allocator__565, uintptr_t); +static struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 io_fixed_buffer_stream_fixedBufferStream__anon_2226__4205(nav__4205_40); +static struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_reader__4194(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *); +static nav__4238_38 io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29_typeEras__4238(void const *, nav__4238_41); +static nav__4280_39 io_Reader_readBytesNoEof__anon_2403__4280(struct io_Reader__2372); +static nav__4285_39 io_Reader_readBytesNoEof__anon_2496__4285(struct io_Reader__2372); +static nav__4245_38 io_Reader_readAll__4245(struct io_Reader__2372, nav__4245_41); +static zig_cold zig_noreturn void bincode_invalidProtocol__anon_2520__4286(void); +static void mem_Allocator_free__anon_2631__4290(struct mem_Allocator__565, nav__4290_40); +static void utils_sleep__4295(uint64_t); +static void shared_Command_deinit__3908(struct shared_Command__2009, struct mem_Allocator__565); +static uint8_t mem_Alignment_fromByteUnits__1032(uintptr_t); +static bool fs_path_isAbsolutePosix__3851(nav__3851_39); +static nav__1701_39 posix_toPosixPath__1701(nav__1701_41); +static uint16_t fs_Dir_accessZ__3767(struct fs_Dir__1899, uint8_t const *, struct fs_File_OpenFlags__1897); +static nav__3703_39 fs_Dir_openFileZ__3703(struct fs_Dir__1899, uint8_t const *, struct fs_File_OpenFlags__1897); +static uint16_t bincode_serializeUnion__anon_2782__4309(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__2009); +static nav__4006_39 array_list_ArrayListAligned_28u8_2cnull_29_allocatedSlice__4006(struct array_list_ArrayListAligned_28u8_2cnull_29__2040); +static nav__1216_38 fs_File_write__1216(struct fs_File__608, nav__1216_41); +static nav__1617_39 posix_clock_gettime__1617(uint32_t); +static nav__1206_38 fs_File_read__1206(struct fs_File__608, nav__1206_41); +static nav__4310_39 mem_Allocator_allocWithSizeAndAlignment__anon_2849__4310(struct mem_Allocator__565, uintptr_t, uintptr_t); +static nav__4197_38 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_read__4197(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *, nav__4197_42); +static uint16_t io_Reader_readNoEof__4247(struct io_Reader__2372, nav__4247_40); +static nav__4246_38 io_Reader_readAtLeast__4246(struct io_Reader__2372, nav__4246_41, uintptr_t); +static zig_cold zig_noreturn void debug_no_panic_call__4312(nav__4312_39, nav__4312_40); +static nav__4339_39 mem_sliceAsBytes__anon_2966__4339(nav__4339_39); +static nav__5194_38 math_cast__anon_3005__5194(uint64_t); +static void mem_Allocator_free__anon_3072__5974(struct mem_Allocator__565, nav__5974_40); +static bool math_isPowerOfTwo__anon_3074__5975(uintptr_t); +static uint16_t posix_faccessatZ__1592(int32_t, uint8_t const *, uint32_t, uint32_t); +static nav__1448_38 posix_openatZ__1448(int32_t, uint8_t const *, uint32_t, uintptr_t); +static uint16_t posix_flock__1609(int32_t, int32_t); +static void posix_close__1406(int32_t); +static uint16_t bincode_serialize__anon_3213__6100(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, uint32_t); +static uint16_t bincode_serialize__anon_3215__6101(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__struct_2012__2012); +static uint16_t bincode_serialize__anon_3217__6102(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071); +static uint16_t bincode_serialize__anon_3219__6103(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__struct_2013__2013); +static nav__1436_38 posix_write__1436(int32_t, nav__1436_40); +static uint16_t posix_errno__anon_3383__6106(int); +static uint16_t posix_unexpectedErrno__1700(uint16_t); +static nav__1428_38 posix_read__1428(int32_t, nav__1428_40); +static nav__6107_38 math_mul__anon_3404__6107(uintptr_t, uintptr_t); +static nav__6108_39 mem_Allocator_allocBytesWithAlignment__anon_3406__6108(struct mem_Allocator__565, uintptr_t, uintptr_t); +static nav__4244_38 io_Reader_read__4244(struct io_Reader__2372, nav__4244_41); +static nav__6109_39 mem_sliceAsBytes__anon_3437__6109(nav__6109_39); +static uint16_t bincode_serializeInt__anon_3599__6113(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, uint32_t); +static uint16_t bincode_serializeStruct__anon_3604__6114(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__struct_2012__2012); +static uint16_t bincode_serializeStruct__anon_3605__6115(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, struct shared_Command__struct_2013__2013); +static uint16_t posix_errno__anon_3616__6117(intptr_t); +static nav__4130_38 io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29___4130(void const *, nav__4130_41); +static uint16_t io_Writer_writeAll__6127(struct io_Writer__3718, nav__6127_40); +static uint16_t bincode_serialize__anon_3754__6136(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, nav__6136_40); +static nav__3984_38 array_list_ArrayListAligned_28u8_2cnull_29_appendWrite__3984(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, nav__3984_42); +static nav__6126_38 io_Writer_write__6126(struct io_Writer__3718, nav__6126_41); +static uint16_t bincode_serializePointer__anon_3766__6137(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071, nav__6137_40); +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_appendSlice__3978(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, nav__3978_41); +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureUnusedCapacity__3997(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, uintptr_t); +static void array_list_ArrayListAligned_28u8_2cnull_29_appendSliceAssumeCapacity__3979(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, nav__3979_41); +static nav__3922_38 array_list_addOrOom__3922(uintptr_t, uintptr_t); +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacity__3995(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, uintptr_t); +static uintptr_t array_list_ArrayListAlignedUnmanaged_28u8_2cnull_29_growCapacity__6196(uintptr_t, uintptr_t); +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacityPrecise__3996(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *, uintptr_t); +static nav__6223_39 mem_Allocator_remap__anon_3824__6223(struct mem_Allocator__565, nav__6223_39, uintptr_t); +static nav__6224_40 mem_Allocator_alignedAlloc__anon_3829__6224(struct mem_Allocator__565, uintptr_t); +static nav__6225_39 mem_bytesAsSlice__anon_3844__6225(nav__6225_39); +#define c_instrument_hooks_deinit__236 instrument_hooks_deinit +zig_extern void instrument_hooks_deinit(struct instruments_root_InstrumentHooks__547 *); +static void instruments_perf_PerfInstrument_deinit__740(struct instruments_perf_PerfInstrument__559 *); +static void mem_Allocator_destroy__anon_3862__6226(struct mem_Allocator__565, struct instruments_root_InstrumentHooks__547 *); +static void fifo_UnixPipe_Writer_deinit__1065(struct fifo_UnixPipe_Writer__600 *); +static void fifo_UnixPipe_Reader_deinit__1072(struct fifo_UnixPipe_Reader__602 *); +static void fs_File_close__1163(struct fs_File__608); +#define c_instrument_hooks_is_instrumented__237 instrument_hooks_is_instrumented +zig_extern bool instrument_hooks_is_instrumented(struct instruments_root_InstrumentHooks__547 *); +static zig_cold uint16_t instruments_perf_PerfInstrument_start_benchmark__743(struct instruments_perf_PerfInstrument__559 *); +#define c_instrument_hooks_start_benchmark__238 instrument_hooks_start_benchmark +zig_extern uint8_t instrument_hooks_start_benchmark(struct instruments_root_InstrumentHooks__547 *); +static bool features_is_feature_enabled__316(uint64_t); +static bool bit_set_IntegerBitSet_2864_29_isSet__351(uint64_t, uintptr_t); +static zig_cold uint16_t instruments_perf_PerfInstrument_stop_benchmark__744(struct instruments_perf_PerfInstrument__559 *); +#define c_instrument_hooks_stop_benchmark__239 instrument_hooks_stop_benchmark +zig_extern uint8_t instrument_hooks_stop_benchmark(struct instruments_root_InstrumentHooks__547 *); +static uint16_t instruments_perf_PerfInstrument_set_executed_benchmark__745(struct instruments_perf_PerfInstrument__559 *, uint32_t, uint8_t const *); +#define c_instrument_hooks_set_executed_benchmark__240 instrument_hooks_set_executed_benchmark +zig_extern uint8_t instrument_hooks_set_executed_benchmark(struct instruments_root_InstrumentHooks__547 *, uint32_t, uint8_t const *); +static nav__6231_39 mem_span__anon_3983__6231(uint8_t const *); +static uintptr_t mem_len__anon_3992__6232(uint8_t const *); +static uintptr_t mem_indexOfSentinel__anon_4000__6233(uint8_t const *); +static bool math_isPowerOfTwo__anon_4011__6234(void); +static nav__6599_38 simd_firstTrue__anon_4223__6599(nav__6599_40); +static uintptr_t mem_alignForward__anon_4226__6600(uintptr_t, uintptr_t); +static bool mem_isAligned__901(uintptr_t, uintptr_t); +static bool mem_isValidAlignGeneric__anon_4284__6601(uintptr_t); +static uintptr_t mem_alignBackward__anon_4285__6602(uintptr_t, uintptr_t); +static bool mem_isAlignedGeneric__anon_4291__6603(uint64_t, uint64_t); +static uint64_t mem_alignBackward__anon_4293__6604(uint64_t, uint64_t); +static bool mem_isValidAlignGeneric__anon_4295__6605(uint64_t); +static bool math_isPowerOfTwo__anon_4296__6606(uint64_t); +#define c_instrument_hooks_executed_benchmark__241 instrument_hooks_executed_benchmark +zig_extern uint8_t instrument_hooks_executed_benchmark(struct instruments_root_InstrumentHooks__547 *, uint32_t, uint8_t const *); +static uint16_t instruments_perf_PerfInstrument_set_integration__746(struct instruments_perf_PerfInstrument__559 *, uint8_t const *, uint8_t const *); +#define c_instrument_hooks_set_integration__242 instrument_hooks_set_integration +zig_extern uint8_t instrument_hooks_set_integration(struct instruments_root_InstrumentHooks__547 *, uint8_t const *, uint8_t const *); +static nav__6607_40 fmt_allocPrintZ__anon_4333__6607(struct mem_Allocator__565, nav__6607_43); +static void mem_Allocator_free__anon_4335__6608(struct mem_Allocator__565, nav__6608_40); +static nav__6609_40 fmt_allocPrint__anon_4360__6609(struct mem_Allocator__565, nav__6609_43); +static nav__6610_39 mem_sliceAsBytes__anon_4367__6610(nav__6610_39); +static uint16_t fmt_format__anon_4442__6652(struct io_Writer__3718, nav__6652_40); +static uint64_t fmt_count__anon_4374__6611(nav__6611_39); +static nav__6612_38 math_cast__anon_4376__6612(uint64_t); +static nav__6613_40 fmt_bufPrint__anon_4385__6613(nav__6613_39, nav__6613_42); +static struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 io_counting_writer_countingWriter__anon_4406__6638(void); +static struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_writer__6637(struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *); +static nav__6651_38 io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWr__6651(void const *, nav__6651_41); +static uint16_t fmt_formatType__anon_4770__6820(uint8_t const *, struct fmt_FormatOptions__4756, struct io_Writer__3718, uintptr_t); +static struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_writer__4195(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *); +static nav__6833_38 io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write__6833(void const *, nav__6833_41); +static nav__4203_39 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_getWritten__4203(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223); +static nav__6636_38 io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_write__6636(struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *, nav__6636_42); +static nav__6664_38 unicode_utf8ByteSequenceLength__6664(uint8_t); +static nav__6680_38 unicode_utf8CountCodepoints__6680(nav__6680_40); +static uint16_t fmt_formatBuf__anon_4870__6834(nav__6834_39, struct fmt_FormatOptions__4756, struct io_Writer__3718); +static nav__4198_38 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_write__4198(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *, nav__4198_42); +static nav__4108_38 io_dummyWrite__4108(nav__4108_40); +static nav__6670_38 unicode_utf8Decode__6670(nav__6670_40); +static nav__6663_38 unicode_utf8CodepointSequenceLength__6663(uint32_t); +static nav__6835_38 unicode_utf8EncodeImpl__anon_5001__6835(uint32_t, nav__6835_40); +static nav__6665_38 unicode_utf8Encode__6665(uint32_t, nav__6665_40); +static uint16_t io_Writer_writeBytesNTimes__6131(struct io_Writer__3718, nav__6131_40, uintptr_t); +static nav__6672_38 unicode_utf8Decode2__6672(nav__6672_40); +static nav__6674_38 unicode_utf8Decode3__6674(nav__6674_40); +static nav__6678_38 unicode_utf8Decode4__6678(nav__6678_40); +static bool unicode_isSurrogateCodepoint__6743(uint32_t); +static nav__6676_38 unicode_utf8Decode3AllowSurrogateHalf__6676(nav__6676_40); +static uint64_t const builtin_zig_backend__247; +static bool const start_simplified_logic__109; +static uint8_t const builtin_output_mode__248; +static uint8_t const builtin_link_mode__249; +static uint64_t features_features__314; +static uintptr_t const bit_set_IntegerBitSet_2864_29_bit_length__345; +static bool const builtin_link_libc__259; +static bool const posix_use_libc__1269; +static struct Target_Os__625 const builtin_os__255; +static uint8_t const c_native_os__1711; +static struct Target_DynamicLinker__801 const Target_DynamicLinker_none__3433; +static bool const builtin_is_test__251; +static uint8_t *heap_CAllocator_alloc__3557(void *, uintptr_t, uint8_t, uintptr_t); +static bool heap_CAllocator_resize__3558(void *, nav__3558_40, uint8_t, uintptr_t, uintptr_t); +static uint8_t *heap_CAllocator_remap__3559(void *, nav__3559_40, uint8_t, uintptr_t, uintptr_t); +static void heap_CAllocator_free__3560(void *, nav__3560_40, uint8_t, uintptr_t); +static uint8_t *heap_CAllocator_alignedAlloc__3554(uintptr_t, uint8_t); +static uintptr_t heap_CAllocator_alignedAllocSize__3556(uint8_t *); +static void heap_CAllocator_alignedFree__3555(uint8_t *); +static uintptr_t mem_Alignment_toByteUnits__1031(uint8_t); +static struct mem_Allocator__565 const heap_c_allocator__3522; +static struct mem_Allocator__565 const c_allocator__233; +static struct Target_Cpu_Feature_Set__817 const Target_Cpu_Feature_Set_empty__3478; +static struct Target_Cpu__786 const builtin_cpu__254; +static uint8_t const builtin_abi__253; +static uint8_t const builtin_object_format__257; +static struct Target__623 const builtin_target__256; +static struct builtin_CallingConvention__266 const builtin_CallingConvention_c__455; +zig_extern uint8_t running_on_valgrind(void); +static uint8_t const (*const shared_RUNNER_CTL_FIFO__3685)[21]; +static uint8_t const (*const shared_RUNNER_ACK_FIFO__3686)[21]; +static uint8_t const mem_native_endian__755; +static uint8_t const fs_path_native_os__3824; +static uint8_t const fs_native_os__1081; +static uint8_t const fs_Dir_native_os__3808; +static uint8_t const os_linux_native_arch__2617; +zig_extern int nanosleep(struct cimport_struct_timespec__2992 const *, struct cimport_struct_timespec__2992 *); +static int const cimport_EINTR__5842; +static uint8_t const builtin_mode__258; +static bool const debug_runtime_safety__159; +zig_extern int flock(int32_t, int); +static bool const fs_Dir_have_flock__3809; +static bool const fs_File_is_windows__1257; +static uint8_t const posix_native_os__1267; +zig_extern int clock_gettime(uint32_t, struct os_linux_timespec__struct_2818__2818 *); +zig_extern int faccessat(int32_t, uint8_t const *, unsigned int, unsigned int); +static bool const posix_lfs64_abi__1697; +zig_extern int openat64(int, uint8_t const *, uint32_t, ...); +zig_extern int close(int32_t); +zig_extern intptr_t write(int32_t, uint8_t const *, uintptr_t); +static uint8_t const c_native_abi__1709; +zig_extern int *zig_e___errno_location(void) zig_mangled(zig_e___errno_location, "__errno_location"); +static bool const posix_unexpected_error_tracing__1698; +zig_extern intptr_t read(int32_t, uint8_t *, uintptr_t); +zig_extern void callgrind_zero_stats(void); +zig_extern void callgrind_start_instrumentation(void); +zig_extern void callgrind_stop_instrumentation(void); +zig_extern void callgrind_dump_stats_at(uint8_t const *); +static bool const mem_backend_supports_vectors__783; +static bool const builtin_valgrind_support__262; +static bool const debug_default_enable_segfault_handler__205; +static uint8_t const log_default_level__6368; +static struct std_Options__4093 const std_options__97; +static nav__3538_38 const heap_page_size_min_default__3538; +static uintptr_t const heap_page_size_min__3517; +static uint16_t const fmt_max_format_args__6436; +static uint8_t const (*const fmt_ANY__6439)[4]; +static uint8_t const unicode_native_endian__6661; +static uint32_t const unicode_replacement_character__6662; +static struct mem_Allocator_VTable__568 const heap_CAllocator_vtable__3549; +zig_extern uintptr_t malloc_usable_size(void const *); +static bool const heap_CAllocator_supports_malloc_size__3550; +static bool const heap_CAllocator_supports_posix_memalign__3552; +zig_extern int posix_memalign(void **, uintptr_t, uintptr_t); +zig_extern void free(void *); +static struct Target_Cpu_Model__812 const Target_x86_cpu_tigerlake__3664; +enum { + zig_error_OutOfMemory = 1u, + zig_error_PermissionDenied = 2u, + zig_error_FileNotFound = 3u, + zig_error_NameTooLong = 4u, + zig_error_InputOutput = 5u, + zig_error_SystemResources = 6u, + zig_error_BadPathName = 7u, + zig_error_FileBusy = 8u, + zig_error_SymLinkLoop = 9u, + zig_error_ReadOnlyFileSystem = 10u, + zig_error_InvalidUtf8 = 11u, + zig_error_InvalidWtf8 = 12u, + zig_error_Unexpected = 13u, + zig_error_SharingViolation = 14u, + zig_error_PathAlreadyExists = 15u, + zig_error_AccessDenied = 16u, + zig_error_PipeBusy = 17u, + zig_error_NoDevice = 18u, + zig_error_NetworkNotFound = 19u, + zig_error_AntivirusInterference = 20u, + zig_error_ProcessFdQuotaExceeded = 21u, + zig_error_SystemFdQuotaExceeded = 22u, + zig_error_FileTooBig = 23u, + zig_error_IsDir = 24u, + zig_error_NoSpaceLeft = 25u, + zig_error_NotDir = 26u, + zig_error_DeviceBusy = 27u, + zig_error_FileLocksNotSupported = 28u, + zig_error_WouldBlock = 29u, + zig_error_DiskQuota = 30u, + zig_error_InvalidArgument = 31u, + zig_error_BrokenPipe = 32u, + zig_error_OperationAborted = 33u, + zig_error_NotOpenForWriting = 34u, + zig_error_LockViolation = 35u, + zig_error_ConnectionResetByPeer = 36u, + zig_error_ProcessNotFound = 37u, + zig_error_AckTimeout = 38u, + zig_error_ConnectionTimedOut = 39u, + zig_error_NotOpenForReading = 40u, + zig_error_SocketNotConnected = 41u, + zig_error_Canceled = 42u, + zig_error_EndOfStream = 43u, + zig_error_UnexpectedError = 44u, + zig_error_UnexpectedResponse = 45u, + zig_error_UnsupportedClock = 46u, + zig_error_Overflow = 47u, + zig_error_Utf8ExpectedContinuation = 48u, + zig_error_Utf8OverlongEncoding = 49u, + zig_error_Utf8EncodesSurrogateHalf = 50u, + zig_error_Utf8CodepointTooLarge = 51u, + zig_error_Utf8InvalidStartByte = 52u, + zig_error_TruncatedInput = 53u, + zig_error_Utf8CannotEncodeSurrogateHalf = 54u, + zig_error_CodepointTooLarge = 55u, +}; +static uint8_t const zig_errorName_OutOfMemory[12] = "OutOfMemory"; +static uint8_t const zig_errorName_PermissionDenied[17] = "PermissionDenied"; +static uint8_t const zig_errorName_FileNotFound[13] = "FileNotFound"; +static uint8_t const zig_errorName_NameTooLong[12] = "NameTooLong"; +static uint8_t const zig_errorName_InputOutput[12] = "InputOutput"; +static uint8_t const zig_errorName_SystemResources[16] = "SystemResources"; +static uint8_t const zig_errorName_BadPathName[12] = "BadPathName"; +static uint8_t const zig_errorName_FileBusy[9] = "FileBusy"; +static uint8_t const zig_errorName_SymLinkLoop[12] = "SymLinkLoop"; +static uint8_t const zig_errorName_ReadOnlyFileSystem[19] = "ReadOnlyFileSystem"; +static uint8_t const zig_errorName_InvalidUtf8[12] = "InvalidUtf8"; +static uint8_t const zig_errorName_InvalidWtf8[12] = "InvalidWtf8"; +static uint8_t const zig_errorName_Unexpected[11] = "Unexpected"; +static uint8_t const zig_errorName_SharingViolation[17] = "SharingViolation"; +static uint8_t const zig_errorName_PathAlreadyExists[18] = "PathAlreadyExists"; +static uint8_t const zig_errorName_AccessDenied[13] = "AccessDenied"; +static uint8_t const zig_errorName_PipeBusy[9] = "PipeBusy"; +static uint8_t const zig_errorName_NoDevice[9] = "NoDevice"; +static uint8_t const zig_errorName_NetworkNotFound[16] = "NetworkNotFound"; +static uint8_t const zig_errorName_AntivirusInterference[22] = "AntivirusInterference"; +static uint8_t const zig_errorName_ProcessFdQuotaExceeded[23] = "ProcessFdQuotaExceeded"; +static uint8_t const zig_errorName_SystemFdQuotaExceeded[22] = "SystemFdQuotaExceeded"; +static uint8_t const zig_errorName_FileTooBig[11] = "FileTooBig"; +static uint8_t const zig_errorName_IsDir[6] = "IsDir"; +static uint8_t const zig_errorName_NoSpaceLeft[12] = "NoSpaceLeft"; +static uint8_t const zig_errorName_NotDir[7] = "NotDir"; +static uint8_t const zig_errorName_DeviceBusy[11] = "DeviceBusy"; +static uint8_t const zig_errorName_FileLocksNotSupported[22] = "FileLocksNotSupported"; +static uint8_t const zig_errorName_WouldBlock[11] = "WouldBlock"; +static uint8_t const zig_errorName_DiskQuota[10] = "DiskQuota"; +static uint8_t const zig_errorName_InvalidArgument[16] = "InvalidArgument"; +static uint8_t const zig_errorName_BrokenPipe[11] = "BrokenPipe"; +static uint8_t const zig_errorName_OperationAborted[17] = "OperationAborted"; +static uint8_t const zig_errorName_NotOpenForWriting[18] = "NotOpenForWriting"; +static uint8_t const zig_errorName_LockViolation[14] = "LockViolation"; +static uint8_t const zig_errorName_ConnectionResetByPeer[22] = "ConnectionResetByPeer"; +static uint8_t const zig_errorName_ProcessNotFound[16] = "ProcessNotFound"; +static uint8_t const zig_errorName_AckTimeout[11] = "AckTimeout"; +static uint8_t const zig_errorName_ConnectionTimedOut[19] = "ConnectionTimedOut"; +static uint8_t const zig_errorName_NotOpenForReading[18] = "NotOpenForReading"; +static uint8_t const zig_errorName_SocketNotConnected[19] = "SocketNotConnected"; +static uint8_t const zig_errorName_Canceled[9] = "Canceled"; +static uint8_t const zig_errorName_EndOfStream[12] = "EndOfStream"; +static uint8_t const zig_errorName_UnexpectedError[16] = "UnexpectedError"; +static uint8_t const zig_errorName_UnexpectedResponse[19] = "UnexpectedResponse"; +static uint8_t const zig_errorName_UnsupportedClock[17] = "UnsupportedClock"; +static uint8_t const zig_errorName_Overflow[9] = "Overflow"; +static uint8_t const zig_errorName_Utf8ExpectedContinuation[25] = "Utf8ExpectedContinuation"; +static uint8_t const zig_errorName_Utf8OverlongEncoding[21] = "Utf8OverlongEncoding"; +static uint8_t const zig_errorName_Utf8EncodesSurrogateHalf[25] = "Utf8EncodesSurrogateHalf"; +static uint8_t const zig_errorName_Utf8CodepointTooLarge[22] = "Utf8CodepointTooLarge"; +static uint8_t const zig_errorName_Utf8InvalidStartByte[21] = "Utf8InvalidStartByte"; +static uint8_t const zig_errorName_TruncatedInput[15] = "TruncatedInput"; +static uint8_t const zig_errorName_Utf8CannotEncodeSurrogateHalf[30] = "Utf8CannotEncodeSurrogateHalf"; +static uint8_t const zig_errorName_CodepointTooLarge[18] = "CodepointTooLarge"; +static struct anon__lazy_57 const zig_errorName[56] = {{zig_errorName_OutOfMemory, 11ul}, {zig_errorName_PermissionDenied, 16ul}, {zig_errorName_FileNotFound, 12ul}, {zig_errorName_NameTooLong, 11ul}, {zig_errorName_InputOutput, 11ul}, {zig_errorName_SystemResources, 15ul}, {zig_errorName_BadPathName, 11ul}, {zig_errorName_FileBusy, 8ul}, {zig_errorName_SymLinkLoop, 11ul}, {zig_errorName_ReadOnlyFileSystem, 18ul}, {zig_errorName_InvalidUtf8, 11ul}, {zig_errorName_InvalidWtf8, 11ul}, {zig_errorName_Unexpected, 10ul}, {zig_errorName_SharingViolation, 16ul}, {zig_errorName_PathAlreadyExists, 17ul}, {zig_errorName_AccessDenied, 12ul}, {zig_errorName_PipeBusy, 8ul}, {zig_errorName_NoDevice, 8ul}, {zig_errorName_NetworkNotFound, 15ul}, {zig_errorName_AntivirusInterference, 21ul}, {zig_errorName_ProcessFdQuotaExceeded, 22ul}, {zig_errorName_SystemFdQuotaExceeded, 21ul}, {zig_errorName_FileTooBig, 10ul}, {zig_errorName_IsDir, 5ul}, {zig_errorName_NoSpaceLeft, 11ul}, {zig_errorName_NotDir, 6ul}, {zig_errorName_DeviceBusy, 10ul}, {zig_errorName_FileLocksNotSupported, 21ul}, {zig_errorName_WouldBlock, 10ul}, {zig_errorName_DiskQuota, 9ul}, {zig_errorName_InvalidArgument, 15ul}, {zig_errorName_BrokenPipe, 10ul}, {zig_errorName_OperationAborted, 16ul}, {zig_errorName_NotOpenForWriting, 17ul}, {zig_errorName_LockViolation, 13ul}, {zig_errorName_ConnectionResetByPeer, 21ul}, {zig_errorName_ProcessNotFound, 15ul}, {zig_errorName_AckTimeout, 10ul}, {zig_errorName_ConnectionTimedOut, 18ul}, {zig_errorName_NotOpenForReading, 17ul}, {zig_errorName_SocketNotConnected, 18ul}, {zig_errorName_Canceled, 8ul}, {zig_errorName_EndOfStream, 11ul}, {zig_errorName_UnexpectedError, 15ul}, {zig_errorName_UnexpectedResponse, 18ul}, {zig_errorName_UnsupportedClock, 16ul}, {zig_errorName_Overflow, 8ul}, {zig_errorName_Utf8ExpectedContinuation, 24ul}, {zig_errorName_Utf8OverlongEncoding, 20ul}, {zig_errorName_Utf8EncodesSurrogateHalf, 24ul}, {zig_errorName_Utf8CodepointTooLarge, 21ul}, {zig_errorName_Utf8InvalidStartByte, 20ul}, {zig_errorName_TruncatedInput, 14ul}, {zig_errorName_Utf8CannotEncodeSurrogateHalf, 29ul}, {zig_errorName_CodepointTooLarge, 17ul}}; + +static uint8_t const __anon_1890[21] = "/tmp/runner.ctl.fifo"; + +static uint8_t const __anon_1950[21] = "/tmp/runner.ack.fifo"; + +static uint8_t const __anon_2869[89] = "Invalid protocol detected: The stream end was found before all required bytes were read."; + +static uint8_t const __anon_4497[10] = "Metadata: "; + +static uint8_t const __anon_4778[1] = " "; + +static uint8_t const __anon_4798[1] = "\000"; + +static uint8_t const __anon_4969[3] = "\357\277\275"; + +static uint8_t const __anon_4843[4] = "any"; + +static uint8_t const __anon_5093[10] = "tigerlake"; + +void c_instrument_hooks_set_feature__234(uint64_t const a0, bool const a1) { + uint64_t t0; + t0 = a0; + features_set_feature__315(t0, a1); + return; +} + +static void features_set_feature__315(uint64_t const a0, bool const a1) { + uint64_t t0; + uintptr_t t1; + if (a1) { + t0 = a0; + t1 = t0; + bit_set_IntegerBitSet_2864_29_set__354(&features_features__314, t1); + goto zig_block_0; + } + t0 = a0; + t1 = t0; + bit_set_IntegerBitSet_2864_29_unset__356(&features_features__314, t1); + goto zig_block_0; + + zig_block_0:; + return; +} + +static void bit_set_IntegerBitSet_2864_29_set__354(uint64_t *const a0, uintptr_t const a1) { + uint64_t *const *t1; + uint64_t t2; + uint64_t t5; + uint64_t *t4; + uint64_t *t0; + bool t3; + t0 = a0; + t1 = (uint64_t *const *)&t0; + t2 = a1; + t3 = t2 < UINT64_C(64); + debug_assert__180(t3); + t4 = (*t1); + t4 = (uint64_t *)((uint8_t *)t4 + (uintptr_t)0ul); + t2 = (*t4); + t5 = bit_set_IntegerBitSet_2864_29_maskBit__375(a1); + t5 = t2 | t5; + (*t4) = t5; + return; +} + +static void bit_set_IntegerBitSet_2864_29_unset__356(uint64_t *const a0, uintptr_t const a1) { + uint64_t *const *t1; + uint64_t t2; + uint64_t t5; + uint64_t *t4; + uint64_t *t0; + bool t3; + t0 = a0; + t1 = (uint64_t *const *)&t0; + t2 = a1; + t3 = t2 < UINT64_C(64); + debug_assert__180(t3); + t4 = (*t1); + t4 = (uint64_t *)((uint8_t *)t4 + (uintptr_t)0ul); + t2 = (*t4); + t5 = bit_set_IntegerBitSet_2864_29_maskBit__375(a1); + t5 = zig_not_u64(t5, UINT8_C(64)); + t5 = t2 & t5; + (*t4) = t5; + return; +} + +static void debug_assert__180(bool const a0) { + bool t0; + t0 = !a0; + if (t0) { + zig_unreachable(); + } + goto zig_block_0; + + zig_block_0:; + return; +} + +static uint64_t bit_set_IntegerBitSet_2864_29_maskBit__375(uintptr_t const a0) { + uint64_t t1; + uint8_t t0; + t0 = (uint8_t)a0; + t1 = zig_shlw_u64(UINT64_C(1), t0, UINT8_C(64)); + return t1; +} + +static nav__1060_39 fifo_UnixPipe_openWrite__1060(struct mem_Allocator__565 const a0, nav__1060_42 const a1) { + nav__1060_39 t1; + struct fifo_UnixPipe_Writer__600 t4; + nav__1060_56 t2; + struct fs_File__608 t3; + uint16_t t0; + t0 = fs_accessAbsolute__1126(a1, (struct fs_File_OpenFlags__1897){UINT8_C(2),UINT8_C(0),false,false}); + if (t0) { + t1.payload = (struct fifo_UnixPipe_Writer__600){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}; + t1.error = t0; + return t1; + } + t2 = fs_openFileAbsolute__1123(a1, (struct fs_File_OpenFlags__1897){UINT8_C(2),UINT8_C(1),true,false}); + if (t2.error) { + t0 = t2.error; + t1.payload = (struct fifo_UnixPipe_Writer__600){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}; + t1.error = t0; + return t1; + } + t3 = t2.payload; + t4 = fifo_UnixPipe_Writer_init__1061(t3, a0); + t1.payload = t4; + t1.error = UINT16_C(0); + return t1; +} + +static nav__1059_39 fifo_UnixPipe_openRead__1059(struct mem_Allocator__565 const a0, nav__1059_42 const a1) { + nav__1059_39 t1; + struct fifo_UnixPipe_Reader__602 t4; + nav__1059_56 t2; + struct fs_File__608 t3; + uint16_t t0; + t0 = fs_accessAbsolute__1126(a1, (struct fs_File_OpenFlags__1897){UINT8_C(2),UINT8_C(0),false,false}); + if (t0) { + t1.payload = (struct fifo_UnixPipe_Reader__602){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}; + t1.error = t0; + return t1; + } + t2 = fs_openFileAbsolute__1123(a1, (struct fs_File_OpenFlags__1897){UINT8_C(2),UINT8_C(1),true,false}); + if (t2.error) { + t0 = t2.error; + t1.payload = (struct fifo_UnixPipe_Reader__602){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}; + t1.error = t0; + return t1; + } + t3 = t2.payload; + t4 = fifo_UnixPipe_Reader_init__1066(t3, a0); + t1.payload = t4; + t1.error = UINT16_C(0); + return t1; +} + +static nav__739_39 instruments_perf_PerfInstrument_init__739(struct mem_Allocator__565 const a0) { + struct instruments_perf_PerfInstrument__559 *t1; + struct mem_Allocator__565 *t2; + struct fifo_UnixPipe_Writer__600 *t3; + nav__739_61 t4; + nav__739_39 t6; + nav__739_39 t0; + struct fifo_UnixPipe_Writer__600 t7; + struct fifo_UnixPipe_Reader__602 *t8; + nav__739_66 t9; + struct fifo_UnixPipe_Reader__602 t10; + uint16_t t5; + t0.error = UINT16_C(0); + t1 = &t0.payload; + t2 = (struct mem_Allocator__565 *)&t1->allocator; + (*t2) = a0; + t3 = (struct fifo_UnixPipe_Writer__600 *)&t1->writer; + t4 = fifo_UnixPipe_openWrite__1060(a0, (nav__739_59){(uint8_t const *)&__anon_1890,(uintptr_t)20ul}); + if (t4.error) { + t5 = t4.error; + t6.payload = (struct instruments_perf_PerfInstrument__559){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}},{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}}; + t6.error = t5; + return t6; + } + t7 = t4.payload; + (*t3) = t7; + t8 = (struct fifo_UnixPipe_Reader__602 *)&t1->reader; + t9 = fifo_UnixPipe_openRead__1059(a0, (nav__739_59){(uint8_t const *)&__anon_1950,(uintptr_t)20ul}); + if (t9.error) { + t5 = t9.error; + t6.payload = (struct instruments_perf_PerfInstrument__559){{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}},{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)},{-INT32_C(0x55555556)}}}; + t6.error = t5; + return t6; + } + t10 = t9.payload; + (*t8) = t10; + return t0; +} + +static nav__715_39 instruments_root_InstrumentHooks_init__715(struct mem_Allocator__565 const a0) { + struct instruments_valgrind_ValgrindInstrument__554 t2; + struct instruments_root_InstrumentHooks__547 t3; + nav__715_39 t4; + struct instruments_perf_PerfInstrument__559 t6; + struct instruments_perf_PerfInstrument__559 t8; + struct instruments_perf_PerfInstrument__559 t5; + nav__715_60 t7; + uint8_t t0; + bool t1; + t0 = running_on_valgrind(); + t1 = t0 > UINT8_C(0); + if (t1) { + t2 = instruments_valgrind_ValgrindInstrument_init__727(a0); + t3.tag = UINT8_C(0); + t3.payload.valgrind = t2; + t4.payload = t3; + t4.error = UINT16_C(0); + return t4; + } + goto zig_block_0; + + zig_block_0:; + t7 = instruments_perf_PerfInstrument_init__739(a0); + t1 = t7.error == UINT16_C(0); + if (t1) { + t8 = t7.payload; + t6 = t8; + goto zig_block_1; + } + return (nav__715_39){{{{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)}}},UINT8_C(2)},0}; + + zig_block_1:; + t5 = t6; + t1 = instruments_perf_PerfInstrument_is_instrumented__742(&t5); + if (t1) { + t6 = t5; + t3.tag = UINT8_C(1); + t3.payload.perf = t6; + t4.payload = t3; + t4.error = UINT16_C(0); + return t4; + } + goto zig_block_2; + + zig_block_2:; + return (nav__715_39){{{{{((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul)}}},UINT8_C(2)},0}; +} + +struct instruments_root_InstrumentHooks__547 *c_instrument_hooks_init__235(void) { + struct instruments_root_InstrumentHooks__547 *t0; + struct instruments_root_InstrumentHooks__547 *t3; + nav__235_46 t1; + nav__235_71 t4; + struct instruments_root_InstrumentHooks__547 t5; + bool t2; + t1 = mem_Allocator_create__anon_858__3561((struct mem_Allocator__565){((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)&heap_CAllocator_vtable__3549)}); + t2 = t1.error == UINT16_C(0); + if (t2) { + t3 = t1.payload; + t0 = t3; + goto zig_block_0; + } + return NULL; + + zig_block_0:; + t4 = instruments_root_InstrumentHooks_init__715((struct mem_Allocator__565){((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)&heap_CAllocator_vtable__3549)}); + t5 = t4.payload; + (*t0) = t5; + t3 = (struct instruments_root_InstrumentHooks__547 *)t0; + return t3; +} + +static nav__3561_40 mem_Allocator_create__anon_858__3561(struct mem_Allocator__565 const a0) { + struct mem_Allocator__565 const *t1; + struct mem_Allocator__565 t2; + struct mem_Allocator__565 t0; + uintptr_t t3; + nav__3561_51 t4; + nav__3561_40 t6; + uint8_t *t7; + struct instruments_root_InstrumentHooks__547 *t8; + uint16_t t5; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = (*t1); + t3 = (uintptr_t)zig_return_address(); + t4 = mem_Allocator_allocBytesWithAlignment__anon_1979__3810(t2, (uintptr_t)72ul, t3); + if (t4.error) { + t5 = t4.error; + t6.payload = ((struct instruments_root_InstrumentHooks__547 *)(uintptr_t)0xaaaaaaaaaaaaaaaaul); + t6.error = t5; + return t6; + } + t7 = t4.payload; + t8 = (struct instruments_root_InstrumentHooks__547 *)t7; + t6.payload = t8; + t6.error = UINT16_C(0); + return t6; +} + +static struct instruments_valgrind_ValgrindInstrument__554 instruments_valgrind_ValgrindInstrument_init__727(struct mem_Allocator__565 const a0) { + struct instruments_valgrind_ValgrindInstrument__554 t0; + t0.allocator = a0; + return t0; +} + +static uint16_t fs_accessAbsolute__1126(nav__1126_39 const a0, struct fs_File_OpenFlags__1897 const a1) { + struct fs_Dir__1899 const *t3; + struct fs_Dir__1899 t1; + struct fs_Dir__1899 t2; + uint16_t t4; + bool t0; + t0 = fs_path_isAbsolute__3845(a0); + debug_assert__180(t0); + t1 = fs_cwd__1118(); + t2 = t1; + t3 = (struct fs_Dir__1899 const *)&t2; + t1 = (*t3); + t4 = fs_Dir_access__3766(t1, a0, a1); + if (t4) { + return t4; + } + return 0; +} + +static nav__1123_39 fs_openFileAbsolute__1123(nav__1123_41 const a0, struct fs_File_OpenFlags__1897 const a1) { + struct fs_Dir__1899 const *t3; + struct fs_Dir__1899 t1; + struct fs_Dir__1899 t2; + nav__1123_39 t4; + bool t0; + t0 = fs_path_isAbsolute__3845(a0); + debug_assert__180(t0); + t1 = fs_cwd__1118(); + t2 = t1; + t3 = (struct fs_Dir__1899 const *)&t2; + t1 = (*t3); + t4 = fs_Dir_openFile__3702(t1, a0, a1); + return t4; +} + +static struct fifo_UnixPipe_Writer__600 fifo_UnixPipe_Writer_init__1061(struct fs_File__608 const a0, struct mem_Allocator__565 const a1) { + struct fs_File__608 *t1; + struct mem_Allocator__565 *t2; + struct fifo_UnixPipe_Writer__600 t0; + t1 = (struct fs_File__608 *)&t0.file; + (*t1) = a0; + t2 = (struct mem_Allocator__565 *)&t0.allocator; + (*t2) = a1; + return t0; +} + +static struct fifo_UnixPipe_Reader__602 fifo_UnixPipe_Reader_init__1066(struct fs_File__608 const a0, struct mem_Allocator__565 const a1) { + struct fs_File__608 *t1; + struct mem_Allocator__565 *t2; + struct fifo_UnixPipe_Reader__602 t0; + t1 = (struct fs_File__608 *)&t0.file; + (*t1) = a0; + t2 = (struct mem_Allocator__565 *)&t0.allocator; + (*t2) = a1; + return t0; +} + +static uint16_t fifo_UnixPipe_Writer_sendCmd__1064(struct fifo_UnixPipe_Writer__600 *const a0, struct shared_Command__2009 const a1) { + struct fifo_UnixPipe_Writer__600 *const *t1; + struct mem_Allocator__565 *t3; + struct mem_Allocator__565 t4; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t5; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t2; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t6; + nav__1064_55 t8; + struct fifo_UnixPipe_Writer__600 *t9; + struct fifo_UnixPipe_Writer__600 *t0; + struct fs_File__608 *t10; + uintptr_t t12; + uint32_t const *t15; + uint8_t const (*t16)[4]; + nav__1064_44 t17; + struct fs_File__608 t11; + uint32_t t13; + uint32_t t14; + uint16_t t7; + t0 = a0; + t1 = (struct fifo_UnixPipe_Writer__600 *const *)&t0; + t3 = (struct mem_Allocator__565 *)&a0->allocator; + t4 = (*t3); + t5 = array_list_ArrayListAligned_28u8_2cnull_29_init__3958(t4); + t2 = t5; + t6 = array_list_ArrayListAligned_28u8_2cnull_29_writer__3983(&t2); + t7 = bincode_serialize__anon_2081__4131(t6, a1); + if (t7) { + t5 = t2; + array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(t5); + return t7; + } + t5 = t2; + t8 = t5.items; + t9 = (*t1); + t10 = (struct fs_File__608 *)&t9->file; + t11 = (*t10); + t12 = t8.len; + t13 = (uint32_t)t12; + t14 = t13; + t15 = (uint32_t const *)&t14; + t16 = mem_asBytes__anon_2122__4132(t15); + t17.ptr = &(*t16)[(uintptr_t)0ul]; + t17.len = (uintptr_t)4ul; + t7 = fs_File_writeAll__1217(t11, t17); + if (t7) { + t5 = t2; + array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(t5); + return t7; + } + t9 = (*t1); + t10 = (struct fs_File__608 *)&t9->file; + t11 = (*t10); + memcpy(&t17, &t8, sizeof(nav__1064_44)); + t7 = fs_File_writeAll__1217(t11, t17); + if (t7) { + t5 = t2; + array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(t5); + return t7; + } + t5 = t2; + array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(t5); + return 0; +} + +static nav__4242_38 bincode_deserializeInt__anon_2357__4242(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0) { + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t1; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t4; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t2; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t0; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t3; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *const *t5; + void const **t7; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t8; + void const *t9; + nav__4242_50 (**t10)(void const *, nav__4242_52); + struct io_Reader__2372 t11; + struct io_Reader__2372 t6; + struct io_Reader__2372 t12; + struct io_Reader__2372 t15; + struct io_Reader__2372 const *t13; + uint8_t const (*t21)[4]; + nav__4242_38 t14; + nav__4242_38 t18; + uint32_t t22; + nav__4242_64 t16; + uint16_t t17; + uint8_t t19[4]; + uint8_t t20[4]; + t0 = a0; + t1 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *)&t0; + t2 = (*t1); + t3 = t2; + t1 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *)&t3; + t4 = t1; + t5 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *const *)&t4; + t7 = (void const **)&t6.context; + t1 = (*t5); + t8 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t1->context; + t9 = (void const *)t8; + (*t7) = t9; + t10 = (nav__4242_50 (**)(void const *, nav__4242_52))&t6.readFn; + (*t10) = &io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29_typeEras__4238; + t11 = t6; + t12 = t11; + t13 = (struct io_Reader__2372 const *)&t12; + t11 = (*t13); + t15 = t11; + t13 = (struct io_Reader__2372 const *)&t15; + t11 = (*t13); + t16 = io_Reader_readBytesNoEof__anon_2403__4280(t11); + if (t16.error) { + t17 = t16.error; + t18.payload = UINT32_C(0xaaaaaaaa); + t18.error = t17; + t14 = t18; + goto zig_block_0; + } + memcpy(t19, t16.payload, sizeof(uint8_t[4])); + memcpy((char *)&t20, t19, sizeof(uint8_t[4])); + t21 = (uint8_t const (*)[4])&t20; + memcpy(t19, (const char *)t21, sizeof(uint8_t[4])); + memcpy(&t22, &t19, sizeof(uint32_t)); + t22 = zig_wrap_u32(t22, UINT8_C(32)); + t18.payload = t22; + t18.error = UINT16_C(0); + t14 = t18; + goto zig_block_0; + + zig_block_0:; + memcpy(&t18, &t14, sizeof(nav__4242_38)); + if (t18.error) { + t17 = t18.error; + t18.payload = UINT32_C(0xaaaaaaaa); + t18.error = t17; + return t18; + } + t22 = t18.payload; + t18.payload = t22; + t18.error = UINT16_C(0); + return t18; +} + +static nav__4241_38 bincode_deserializeAlloc__anon_2346__4241(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4241_38 t0; + uint32_t t2; + uint16_t t1; + (void)a1; + t0 = bincode_deserializeInt__anon_2357__4242(a0); + if (t0.error) { + t1 = t0.error; + t0.payload = UINT32_C(0xaaaaaaaa); + t0.error = t1; + return t0; + } + t2 = t0.payload; + t0.payload = t2; + t0.error = UINT16_C(0); + return t0; +} + +static nav__4284_40 bincode_deserializePointerAlloc__anon_2483__4284(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t1; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t6; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t7; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *t32; + struct mem_Allocator__565 const *t3; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t4; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t0; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t5; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t31; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *const *t8; + void const **t10; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t11; + void const *t12; + nav__4284_59 (**t13)(void const *, nav__4284_39); + struct io_Reader__2372 t14; + struct io_Reader__2372 t9; + struct io_Reader__2372 t15; + struct io_Reader__2372 t18; + struct io_Reader__2372 t33; + struct io_Reader__2372 t34; + struct io_Reader__2372 const *t16; + nav__4284_69 t17; + nav__4284_69 t21; + uint8_t const (*t24)[8]; + uint64_t t25; + uint64_t t38; + nav__4284_40 t26; + uintptr_t t27; + uintptr_t t37; + struct mem_Allocator__565 t29; + struct mem_Allocator__565 t2; + nav__4284_39 t30; + nav__4284_39 t28; + nav__4284_59 t35; + nav__4284_59 t36; + nav__4284_72 t19; + uint16_t t20; + uint8_t t22[8]; + uint8_t t23[8]; + bool t39; + t0 = a0; + t1 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *)&t0; + t2 = a1; + t3 = (struct mem_Allocator__565 const *)&t2; + t4 = (*t1); + t5 = t4; + t6 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *)&t5; + t7 = t6; + t8 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *const *)&t7; + t10 = (void const **)&t9.context; + t6 = (*t8); + t11 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t6->context; + t12 = (void const *)t11; + (*t10) = t12; + t13 = (nav__4284_59 (**)(void const *, nav__4284_39))&t9.readFn; + (*t13) = &io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29_typeEras__4238; + t14 = t9; + t15 = t14; + t16 = (struct io_Reader__2372 const *)&t15; + t14 = (*t16); + t18 = t14; + t16 = (struct io_Reader__2372 const *)&t18; + t14 = (*t16); + t19 = io_Reader_readBytesNoEof__anon_2496__4285(t14); + if (t19.error) { + t20 = t19.error; + t21.payload = UINT64_C(0xaaaaaaaaaaaaaaaa); + t21.error = t20; + t17 = t21; + goto zig_block_0; + } + memcpy(t22, t19.payload, sizeof(uint8_t[8])); + memcpy((char *)&t23, t22, sizeof(uint8_t[8])); + t24 = (uint8_t const (*)[8])&t23; + memcpy(t22, (const char *)t24, sizeof(uint8_t[8])); + memcpy(&t25, &t22, sizeof(uint64_t)); + t25 = zig_wrap_u64(t25, UINT8_C(64)); + t21.payload = t25; + t21.error = UINT16_C(0); + t17 = t21; + goto zig_block_0; + + zig_block_0:; + memcpy(&t21, &t17, sizeof(nav__4284_69)); + if (t21.error) { + t20 = t21.error; + t26.payload = (nav__4284_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t26.error = t20; + return t26; + } + t25 = t21.payload; + t27 = t25; + t29 = (*t3); + t26 = mem_Allocator_alloc__anon_2204__4173(t29, t27); + if (t26.error) { + t20 = t26.error; + t26.payload = (nav__4284_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t26.error = t20; + return t26; + } + t30 = t26.payload; + t28 = t30; + t4 = (*t1); + t30 = t28; + t31 = t4; + t1 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *)&t31; + t32 = t1; + t8 = (struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const *const *)&t32; + t10 = (void const **)&t33.context; + t1 = (*t8); + t11 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t1->context; + t12 = (void const *)t11; + (*t10) = t12; + t13 = (nav__4284_59 (**)(void const *, nav__4284_39))&t33.readFn; + (*t13) = &io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29_typeEras__4238; + t14 = t33; + t34 = t14; + t16 = (struct io_Reader__2372 const *)&t34; + t14 = (*t16); + t35 = io_Reader_readAll__4245(t14, t30); + memcpy(&t36, &t35, sizeof(nav__4284_59)); + t37 = t36.payload; + t25 = t37; + t38 = t27; + t39 = t25 != t38; + if (t39) { + bincode_invalidProtocol__anon_2520__4286(); + zig_unreachable(); + } + goto zig_block_1; + + zig_block_1:; + t30 = t28; + t26.payload = t30; + t26.error = UINT16_C(0); + return t26; +} + +static nav__4283_40 bincode_deserializeAlloc__anon_2470__4283(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4283_57 t0; + nav__4283_40 t2; + nav__4283_55 t3; + nav__4283_39 t4; + uint16_t t1; + t0 = bincode_deserializePointerAlloc__anon_2483__4284(a0, a1); + if (t0.error) { + t1 = t0.error; + t2.payload = (nav__4283_39){(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t2.error = t1; + return t2; + } + t3 = t0.payload; + memcpy(&t4, &t3, sizeof(nav__4283_39)); + t2.payload = t4; + t2.error = UINT16_C(0); + return t2; +} + +static nav__4282_39 bincode_deserializeStructAlloc__anon_2445__4282(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + uint32_t *t1; + nav__4282_39 t4; + nav__4282_44 *t6; + nav__4282_60 t7; + nav__4282_44 t8; + struct shared_Command__struct_2012__2012 t9; + struct shared_Command__struct_2012__2012 t0; + nav__4282_57 t2; + uint32_t t5; + uint16_t t3; + t1 = (uint32_t *)&t0.pid; + t2 = bincode_deserializeAlloc__anon_2346__4241(a0, a1); + if (t2.error) { + t3 = t2.error; + t4.payload = (struct shared_Command__struct_2012__2012){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}; + t4.error = t3; + return t4; + } + t5 = t2.payload; + (*t1) = t5; + t6 = (nav__4282_44 *)&t0.uri; + t7 = bincode_deserializeAlloc__anon_2470__4283(a0, a1); + if (t7.error) { + t3 = t7.error; + t4.payload = (struct shared_Command__struct_2012__2012){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}; + t4.error = t3; + return t4; + } + t8 = t7.payload; + (*t6) = t8; + t9 = t0; + t4.payload = t9; + t4.error = UINT16_C(0); + return t4; +} + +static nav__4281_39 bincode_deserializeAlloc__anon_2416__4281(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4281_39 t0; + struct shared_Command__struct_2012__2012 t2; + uint16_t t1; + t0 = bincode_deserializeStructAlloc__anon_2445__4282(a0, a1); + if (t0.error) { + t1 = t0.error; + t0.payload = (struct shared_Command__struct_2012__2012){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}; + t0.error = t1; + return t0; + } + t2 = t0.payload; + t0.payload = t2; + t0.error = UINT16_C(0); + return t0; +} + +static uint16_t bincode_deserializeAlloc__anon_2535__4287(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + (void)a0; + (void)a1; + return 0; +} + +static nav__4289_39 bincode_deserializeStructAlloc__anon_2597__4289(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4289_44 *t1; + nav__4289_57 t2; + nav__4289_39 t4; + nav__4289_44 t5; + struct shared_Command__struct_2013__2013 t6; + struct shared_Command__struct_2013__2013 t0; + uint16_t t3; + t1 = (nav__4289_44 *)&t0.name; + t2 = bincode_deserializeAlloc__anon_2470__4283(a0, a1); + if (t2.error) { + t3 = t2.error; + t4.payload = (struct shared_Command__struct_2013__2013){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}}; + t4.error = t3; + return t4; + } + t5 = t2.payload; + (*t1) = t5; + t1 = (nav__4289_44 *)&t0.version; + t2 = bincode_deserializeAlloc__anon_2470__4283(a0, a1); + if (t2.error) { + t3 = t2.error; + t4.payload = (struct shared_Command__struct_2013__2013){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}}; + t4.error = t3; + return t4; + } + t5 = t2.payload; + (*t1) = t5; + t6 = t0; + t4.payload = t6; + t4.error = UINT16_C(0); + return t4; +} + +static nav__4288_39 bincode_deserializeAlloc__anon_2578__4288(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4288_39 t0; + struct shared_Command__struct_2013__2013 t2; + uint16_t t1; + t0 = bincode_deserializeStructAlloc__anon_2597__4289(a0, a1); + if (t0.error) { + t1 = t0.error; + t0.payload = (struct shared_Command__struct_2013__2013){{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}}; + t0.error = t1; + return t0; + } + t2 = t0.payload; + t0.payload = t2; + t0.error = UINT16_C(0); + return t0; +} + +static nav__4240_39 bincode_deserializeUnionAlloc__anon_2341__4240(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4240_39 t2; + nav__4240_63 t6; + struct shared_Command__struct_2012__2012 t7; + struct shared_Command__2009 t8; + nav__4240_65 t9; + struct shared_Command__struct_2013__2013 t10; + nav__4240_61 t0; + uint32_t t3; + uint16_t t1; + uint8_t t4; + bool t5; + t0 = bincode_deserializeAlloc__anon_2346__4241(a0, a1); + if (t0.error) { + t1 = t0.error; + t2.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t2.error = t1; + return t2; + } + t3 = t0.payload; + t4 = (uint8_t)t3; + t5 = t4 == UINT8_C(0); + if (t5) { + t6 = bincode_deserializeAlloc__anon_2416__4281(a0, a1); + if (t6.error) { + t1 = t6.error; + t2.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t2.error = t1; + return t2; + } + t7 = t6.payload; + t8.tag = UINT8_C(0); + t8.payload.ExecutedBenchmark = t7; + t2.payload = t8; + t2.error = UINT16_C(0); + return t2; + } + goto zig_block_0; + + zig_block_0:; + t5 = t4 == UINT8_C(1); + if (t5) { + (void)bincode_deserializeAlloc__anon_2535__4287(a0, a1); + return (nav__4240_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(1)},0}; + } + goto zig_block_1; + + zig_block_1:; + t5 = t4 == UINT8_C(2); + if (t5) { + (void)bincode_deserializeAlloc__anon_2535__4287(a0, a1); + return (nav__4240_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(2)},0}; + } + goto zig_block_2; + + zig_block_2:; + t5 = t4 == UINT8_C(3); + if (t5) { + (void)bincode_deserializeAlloc__anon_2535__4287(a0, a1); + return (nav__4240_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(3)},0}; + } + goto zig_block_3; + + zig_block_3:; + t5 = t4 == UINT8_C(4); + if (t5) { + (void)bincode_deserializeAlloc__anon_2535__4287(a0, a1); + return (nav__4240_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(4)},0}; + } + goto zig_block_4; + + zig_block_4:; + t5 = t4 == UINT8_C(5); + if (t5) { + t9 = bincode_deserializeAlloc__anon_2578__4288(a0, a1); + if (t9.error) { + t1 = t9.error; + t2.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t2.error = t1; + return t2; + } + t10 = t9.payload; + t8.tag = UINT8_C(5); + t8.payload.SetIntegration = t10; + t2.payload = t8; + t2.error = UINT16_C(0); + return t2; + } + goto zig_block_5; + + zig_block_5:; + t5 = t4 == UINT8_C(6); + if (t5) { + (void)bincode_deserializeAlloc__anon_2535__4287(a0, a1); + return (nav__4240_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(6)},0}; + } + goto zig_block_6; + + zig_block_6:; + zig_unreachable(); +} + +static nav__4239_39 bincode_deserializeAlloc__anon_2254__4239(struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 const a0, struct mem_Allocator__565 const a1) { + nav__4239_39 t0; + struct shared_Command__2009 t2; + uint16_t t1; + t0 = bincode_deserializeUnionAlloc__anon_2341__4240(a0, a1); + if (t0.error) { + t1 = t0.error; + t0.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t0.error = t1; + return t0; + } + t2 = t0.payload; + t0.payload = t2; + t0.error = UINT16_C(0); + return t0; +} + +static nav__1069_39 fifo_UnixPipe_Reader_recvCmd__1069(struct fifo_UnixPipe_Reader__602 *const a0) { + struct fifo_UnixPipe_Reader__602 *const *t1; + struct fifo_UnixPipe_Reader__602 *t3; + struct fifo_UnixPipe_Reader__602 *t0; + struct fs_File__608 *t4; + nav__1069_66 t6; + nav__1069_69 t7; + nav__1069_39 t9; + uint8_t const (*t10)[4]; + struct mem_Allocator__565 *t13; + struct mem_Allocator__565 t14; + uintptr_t t15; + nav__1069_73 t16; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t19; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t18; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t20; + struct shared_Command__2009 t21; + struct fs_File__608 t5; + uint32_t t12; + uint16_t t8; + uint8_t t11[4]; + uint8_t t2[4]; + bool t17; + t0 = a0; + t1 = (struct fifo_UnixPipe_Reader__602 *const *)&t0; + t3 = (*t1); + t4 = (struct fs_File__608 *)&t3->file; + t5 = (*t4); + t6.ptr = &t2[(uintptr_t)0ul]; + t6.len = (uintptr_t)4ul; + t7 = fs_File_readAll__1207(t5, t6); + if (t7.error) { + t8 = t7.error; + t9.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t9.error = t8; + return t9; + } + t10 = (uint8_t const (*)[4])&t2; + memcpy(t11, (const char *)t10, sizeof(uint8_t[4])); + memcpy(&t12, &t11, sizeof(uint32_t)); + t12 = zig_wrap_u32(t12, UINT8_C(32)); + t3 = (*t1); + t13 = (struct mem_Allocator__565 *)&t3->allocator; + t14 = (*t13); + t15 = (uintptr_t)t12; + t16 = mem_Allocator_alloc__anon_2204__4173(t14, t15); + if (t16.error) { + t8 = t16.error; + t9.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t9.error = t8; + return t9; + } + t6 = t16.payload; + zig_loop_36: + t3 = (*t1); + t4 = (struct fs_File__608 *)&t3->file; + t5 = (*t4); + t7 = fs_File_readAll__1207(t5, t6); + t17 = t7.error == UINT16_C(0); + if (t17) { + goto zig_block_2; + } + goto zig_block_1; + + zig_block_2:; + goto zig_block_0; + + zig_block_1:; + goto zig_loop_36; + + zig_block_0:; + t19 = io_fixed_buffer_stream_fixedBufferStream__anon_2226__4205(t6); + t18 = t19; + t20 = io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_reader__4194(&t18); + t13 = (struct mem_Allocator__565 *)&a0->allocator; + t14 = (*t13); + t9 = bincode_deserializeAlloc__anon_2254__4239(t20, t14); + if (t9.error) { + t8 = t9.error; + t3 = (*t1); + t13 = (struct mem_Allocator__565 *)&t3->allocator; + t14 = (*t13); + mem_Allocator_free__anon_2631__4290(t14, t6); + t9.payload = (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)}; + t9.error = t8; + return t9; + } + t21 = t9.payload; + t3 = (*t1); + t13 = (struct mem_Allocator__565 *)&t3->allocator; + t14 = (*t13); + mem_Allocator_free__anon_2631__4290(t14, t6); + t9.payload = t21; + t9.error = UINT16_C(0); + return t9; +} + +static nav__1070_39 fifo_UnixPipe_Reader_waitForResponse__1070(struct fifo_UnixPipe_Reader__602 *const a0, nav__1070_42 const a1) { + zig_i128 t2; + zig_i128 t6; + struct fifo_UnixPipe_Reader__602 *const *t1; + uint64_t t3; + uint64_t t5; + struct shared_Command__2009 t7; + struct shared_Command__2009 t10; + struct fifo_UnixPipe_Reader__602 *t8; + struct fifo_UnixPipe_Reader__602 *t0; + nav__1070_39 t9; + bool t4; + t0 = a0; + t1 = (struct fifo_UnixPipe_Reader__602 *const *)&t0; + t2 = time_nanoTimestamp__4145(); + t4 = a1.is_null != true; + if (t4) { + t5 = a1.payload; + t3 = t5; + goto zig_block_0; + } + t3 = UINT64_C(5000000000); + goto zig_block_0; + + zig_block_0:; + zig_loop_13: + t6 = time_nanoTimestamp__4145(); + t6 = zig_sub_i128(t6, t2); + t5 = zig_lo_i128(t6); + t4 = t5 > t3; + if (t4) { + return (nav__1070_39){{{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(0x2)},zig_error_AckTimeout}; + } + goto zig_block_2; + + zig_block_2:; + t8 = (*t1); + t9 = fifo_UnixPipe_Reader_recvCmd__1069(t8); + t4 = t9.error == UINT16_C(0); + if (t4) { + t10 = t9.payload; + t7 = t10; + goto zig_block_3; + } + utils_sleep__4295(UINT64_C(10000000)); + goto zig_block_1; + + zig_block_3:; + t9.payload = t7; + t9.error = UINT16_C(0); + return t9; + + zig_block_1:; + goto zig_loop_13; +} + +static uint16_t fifo_UnixPipe_Reader_waitForAck__1071(struct fifo_UnixPipe_Reader__602 *const a0, nav__1071_40 const a1) { + struct fifo_UnixPipe_Reader__602 *const *t1; + struct fifo_UnixPipe_Reader__602 *t2; + struct fifo_UnixPipe_Reader__602 *t0; + nav__1071_55 t3; + struct shared_Command__2009 t5; + struct shared_Command__2009 t6; + struct shared_Command__2009 const *t7; + struct mem_Allocator__565 *t10; + struct mem_Allocator__565 t11; + uint16_t t4; + uint8_t t8; + bool t9; + t0 = a0; + t1 = (struct fifo_UnixPipe_Reader__602 *const *)&t0; + t2 = (*t1); + t3 = fifo_UnixPipe_Reader_waitForResponse__1070(t2, a1); + if (t3.error) { + t4 = t3.error; + return t4; + } + t5 = t3.payload; + t6 = t5; + t7 = (struct shared_Command__2009 const *)&t6; + t8 = t5.tag; + t9 = t8 == UINT8_C(3); + if (t9) { + t5 = (*t7); + t10 = (struct mem_Allocator__565 *)&a0->allocator; + t11 = (*t10); + shared_Command_deinit__3908(t5, t11); + return 0; + } + t8 = t5.tag; + t9 = t8 == UINT8_C(6); + if (t9) { + t5 = (*t7); + t10 = (struct mem_Allocator__565 *)&a0->allocator; + t11 = (*t10); + shared_Command_deinit__3908(t5, t11); + return zig_error_UnexpectedError; + } + t5 = (*t7); + t10 = (struct mem_Allocator__565 *)&a0->allocator; + t11 = (*t10); + shared_Command_deinit__3908(t5, t11); + return zig_error_UnexpectedResponse; +} + +static uint16_t instruments_perf_PerfInstrument_send_cmd__741(struct instruments_perf_PerfInstrument__559 *const a0, struct shared_Command__2009 const a1) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + struct fifo_UnixPipe_Reader__602 *t5; + uint16_t t4; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + t4 = fifo_UnixPipe_Writer_sendCmd__1064(t3, a1); + if (t4) { + return t4; + } + t2 = (*t1); + t5 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + t4 = fifo_UnixPipe_Reader_waitForAck__1071(t5, (nav__741_67){UINT64_C(0xaaaaaaaaaaaaaaaa),true}); + if (t4) { + return t4; + } + return 0; +} + +static bool instruments_perf_PerfInstrument_is_instrumented__742(struct instruments_perf_PerfInstrument__559 *const a0) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + uint16_t t3; + bool t4; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = instruments_perf_PerfInstrument_send_cmd__741(t2, (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(4)}); + t4 = t3 == UINT16_C(0); + if (t4) { + goto zig_block_0; + } + return false; + + zig_block_0:; + return true; +} + +static nav__3810_39 mem_Allocator_allocBytesWithAlignment__anon_1979__3810(struct mem_Allocator__565 const a0, uintptr_t const a1, uintptr_t const a2) { + struct mem_Allocator__565 const *t1; + uint64_t t2; + uint8_t *t4; + uint8_t *t13; + uint8_t *t14; + uint8_t *t15; + struct mem_Allocator__565 t5; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t7; + struct mem_Allocator_VTable__568 const *const *t8; + struct mem_Allocator_VTable__568 const *t9; + uint8_t *(*const *t10)(void *, uintptr_t, uint8_t, uintptr_t); + uint8_t *(*t11)(void *, uintptr_t, uint8_t, uintptr_t); + void *t12; + uint8_t *const *t16; + nav__3810_52 t17; + nav__3810_39 t18; + bool t3; + uint8_t t6; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = a1; + t3 = t2 == UINT64_C(0); + if (t3) { + return (nav__3810_39){(uint8_t *)(uintptr_t)0xfffffffffffffff8ul,0}; + } + goto zig_block_0; + + zig_block_0:; + t5 = (*t1); + t6 = mem_Alignment_fromByteUnits__1032((uintptr_t)8ul); + t7 = t5; + t1 = (struct mem_Allocator__565 const *)&t7; + t8 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t9 = (*t8); + t10 = (uint8_t *(*const *)(void *, uintptr_t, uint8_t, uintptr_t))&t9->alloc; + t11 = (*t10); + t12 = t5.ptr; + t13 = t11(t12, a1, t6, a2); + t3 = t13 != NULL; + if (t3) { + t14 = t13; + t4 = t14; + goto zig_block_1; + } + return (nav__3810_39){((uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),zig_error_OutOfMemory}; + + zig_block_1:; + t15 = t4; + t16 = (uint8_t *const *)&t15; + t14 = (*t16); + t14 = (uint8_t *)(((uintptr_t)t14) + ((uintptr_t)0ul*sizeof(uint8_t))); + t17.ptr = t14; + t17.len = a1; + t14 = (uint8_t *)t4; + t18.payload = t14; + t18.error = UINT16_C(0); + return t18; +} + +static bool fs_path_isAbsolute__3845(nav__3845_39 const a0) { + bool t0; + t0 = fs_path_isAbsolutePosix__3851(a0); + return t0; +} + +static struct fs_Dir__1899 fs_cwd__1118(void) { + struct fs_Dir__1899 t0; + t0 = (struct fs_Dir__1899){-INT32_C(100)}; + return t0; +} + +static uint16_t fs_Dir_access__3766(struct fs_Dir__1899 const a0, nav__3766_40 const a1, struct fs_File_OpenFlags__1897 const a2) { + struct fs_Dir__1899 const *t1; + uint8_t const (*t6)[4096]; + uint8_t const *t8; + struct fs_Dir__1899 t7; + struct fs_Dir__1899 t0; + nav__3766_49 t2; + uint16_t t3; + uint8_t t4[4096]; + uint8_t t5[4096]; + t0 = a0; + t1 = (struct fs_Dir__1899 const *)&t0; + t2 = posix_toPosixPath__1701(a1); + if (t2.error) { + t3 = t2.error; + return t3; + } + memcpy(t4, t2.payload, sizeof(uint8_t[4096])); + memcpy((char *)&t5, t4, sizeof(uint8_t[4096])); + t6 = (uint8_t const (*)[4096])&t5; + t7 = (*t1); + t8 = (uint8_t const *)t6; + t3 = fs_Dir_accessZ__3767(t7, t8, a2); + return t3; +} + +static nav__3702_39 fs_Dir_openFile__3702(struct fs_Dir__1899 const a0, nav__3702_42 const a1, struct fs_File_OpenFlags__1897 const a2) { + struct fs_Dir__1899 const *t1; + uint8_t const (*t8)[4096]; + uint8_t const *t10; + nav__3702_39 t5; + struct fs_Dir__1899 t9; + struct fs_Dir__1899 t0; + nav__3702_53 t3; + uint16_t t4; + uint8_t t6[4096]; + uint8_t t7[4096]; + struct fs_File_OpenFlags__1897 t2; + t0 = a0; + t1 = (struct fs_Dir__1899 const *)&t0; + t2 = a2; + t3 = posix_toPosixPath__1701(a1); + if (t3.error) { + t4 = t3.error; + t5.payload = (struct fs_File__608){-INT32_C(0x55555556)}; + t5.error = t4; + return t5; + } + memcpy(t6, t3.payload, sizeof(uint8_t[4096])); + memcpy((char *)&t7, t6, sizeof(uint8_t[4096])); + t8 = (uint8_t const (*)[4096])&t7; + t9 = (*t1); + t10 = (uint8_t const *)t8; + t5 = fs_Dir_openFileZ__3703(t9, t10, a2); + return t5; +} + +static struct array_list_ArrayListAligned_28u8_2cnull_29__2040 array_list_ArrayListAligned_28u8_2cnull_29_init__3958(struct mem_Allocator__565 const a0) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t0; + t0.items = (nav__3958_42){(uint8_t *)((void const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),(uintptr_t)0ul}; + t0.capacity = (uintptr_t)0ul; + t0.allocator = a0; + return t0; +} + +static struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 array_list_ArrayListAligned_28u8_2cnull_29_writer__3983(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 **t1; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 **)&t0.context; + (*t1) = a0; + return t0; +} + +static uint16_t bincode_serialize__anon_2081__4131(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__2009 const a1) { + uint16_t t0; + t0 = bincode_serializeUnion__anon_2782__4309(a0, a1); + if (t0) { + return t0; + } + return 0; +} + +static void array_list_ArrayListAligned_28u8_2cnull_29_deinit__3960(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const a0) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const *t1; + struct mem_Allocator__565 const *t2; + struct mem_Allocator__565 t3; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t4; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t0; + nav__3960_41 t5; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const *)&t0; + t2 = (struct mem_Allocator__565 const *)&t1->allocator; + t3 = (*t2); + t4 = (*t1); + t5 = array_list_ArrayListAligned_28u8_2cnull_29_allocatedSlice__4006(t4); + mem_Allocator_free__anon_2631__4290(t3, t5); + return; +} + +static uint8_t const (*mem_asBytes__anon_2122__4132(uint32_t const *const a0))[4] { + uint8_t const (*t0)[4]; + t0 = (uint8_t const (*)[4])a0; + return t0; +} + +static uint16_t fs_File_writeAll__1217(struct fs_File__608 const a0, nav__1217_40 const a1) { + struct fs_File__608 const *t1; + nav__1217_40 const *t3; + uintptr_t t5; + uintptr_t t6; + uintptr_t t13; + uintptr_t t4; + uint64_t t7; + uint64_t t8; + nav__1217_40 t11; + nav__1217_40 t2; + uint8_t const *t12; + nav__1217_47 t14; + struct fs_File__608 t10; + struct fs_File__608 t0; + uint16_t t15; + bool t9; + t0 = a0; + t1 = (struct fs_File__608 const *)&t0; + t2 = a1; + t3 = (nav__1217_40 const *)&t2; + t4 = (uintptr_t)0ul; + zig_loop_11: + t5 = t4; + t6 = a1.len; + t7 = t5; + t8 = t6; + t9 = t7 < t8; + if (t9) { + t6 = t4; + t10 = (*t1); + t5 = t4; + t11 = (*t3); + t12 = t11.ptr; + t12 = (uint8_t const *)(((uintptr_t)t12) + (t5*sizeof(uint8_t))); + t13 = t11.len; + t5 = t13 - t5; + t11.ptr = t12; + t11.len = t5; + t14 = fs_File_write__1216(t10, t11); + if (t14.error) { + t15 = t14.error; + return t15; + } + t5 = t14.payload; + t5 = t6 + t5; + t4 = t5; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_11; + + zig_block_0:; + return 0; +} + +static zig_i128 time_nanoTimestamp__4145(void) { + zig_i128 t6; + zig_i128 t7; + nav__4145_41 t0; + struct os_linux_timespec__struct_2818__2818 t2; + struct os_linux_timespec__struct_2818__2818 t3; + intptr_t t5; + uint16_t t4; + bool t1; + t0 = posix_clock_gettime__1617(UINT32_C(0)); + t1 = t0.error == UINT16_C(0); + if (t1) { + t3 = t0.payload; + t2 = t3; + goto zig_block_0; + } + t4 = t0.error; + switch (t4) { + case zig_error_UnsupportedClock: + case zig_error_Unexpected: { + return zig_make_i128(INT64_C(0), UINT64_C(0)); + } + default: zig_unreachable(); + } + + zig_block_0:; + t5 = t2.sec; + t6 = zig_make_i128(0, t5); + t6 = zig_mul_i128(t6, zig_make_i128(INT64_C(0), UINT64_C(1000000000))); + t5 = t2.nsec; + t7 = zig_make_i128(0, t5); + t7 = zig_add_i128(t6, t7); + return t7; +} + +static nav__1207_38 fs_File_readAll__1207(struct fs_File__608 const a0, nav__1207_41 const a1) { + struct fs_File__608 const *t1; + nav__1207_41 const *t3; + uintptr_t t5; + uintptr_t t6; + uintptr_t t4; + uint64_t t7; + uint64_t t8; + nav__1207_41 t11; + nav__1207_41 t2; + uint8_t *t12; + nav__1207_38 t13; + struct fs_File__608 t10; + struct fs_File__608 t0; + uint16_t t14; + bool t9; + t0 = a0; + t1 = (struct fs_File__608 const *)&t0; + t2 = a1; + t3 = (nav__1207_41 const *)&t2; + t4 = (uintptr_t)0ul; + zig_loop_11: + t5 = t4; + t6 = a1.len; + t7 = t5; + t8 = t6; + t9 = t7 != t8; + if (t9) { + t10 = (*t1); + t6 = t4; + t11 = (*t3); + t12 = t11.ptr; + t12 = (uint8_t *)(((uintptr_t)t12) + (t6*sizeof(uint8_t))); + t5 = t11.len; + t6 = t5 - t6; + t11.ptr = t12; + t11.len = t6; + t13 = fs_File_read__1206(t10, t11); + if (t13.error) { + t14 = t13.error; + t13.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t13.error = t14; + return t13; + } + t6 = t13.payload; + t8 = t6; + t9 = t8 == UINT64_C(0); + if (t9) { + goto zig_block_0; + } + goto zig_block_2; + + zig_block_2:; + t5 = t4; + t6 = t5 + t6; + t4 = t6; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_11; + + zig_block_0:; + t6 = t4; + t13.payload = t6; + t13.error = UINT16_C(0); + return t13; +} + +static nav__4173_40 mem_Allocator_alloc__anon_2204__4173(struct mem_Allocator__565 const a0, uintptr_t const a1) { + struct mem_Allocator__565 const *t1; + struct mem_Allocator__565 t2; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t5; + uintptr_t t3; + nav__4173_40 t4; + nav__4173_40 t8; + nav__4173_51 t6; + uint8_t *t9; + uint8_t *t10; + uint8_t *const *t11; + nav__4173_39 t12; + uint16_t t7; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = (*t1); + t3 = (uintptr_t)zig_return_address(); + t5 = t2; + t1 = (struct mem_Allocator__565 const *)&t5; + t2 = (*t1); + t6 = mem_Allocator_allocWithSizeAndAlignment__anon_2849__4310(t2, a1, t3); + if (t6.error) { + t7 = t6.error; + t8.payload = (nav__4173_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t8.error = t7; + t4 = t8; + goto zig_block_0; + } + t9 = t6.payload; + t10 = t9; + t11 = (uint8_t *const *)&t10; + t9 = (*t11); + t9 = (uint8_t *)(((uintptr_t)t9) + ((uintptr_t)0ul*sizeof(uint8_t))); + t12.ptr = t9; + t12.len = a1; + t8.payload = t12; + t8.error = UINT16_C(0); + t4 = t8; + goto zig_block_0; + + zig_block_0:; + return t4; +} + +static struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 io_fixed_buffer_stream_fixedBufferStream__anon_2226__4205(nav__4205_40 const a0) { + nav__4205_40 *t1; + uintptr_t *t2; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t0; + t1 = (nav__4205_40 *)&t0.buffer; + (*t1) = a0; + t2 = (uintptr_t *)&t0.pos; + (*t2) = (uintptr_t)0ul; + return t0; +} + +static struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_reader__4194(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const a0) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 **t1; + struct io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29__2246 t0; + t1 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 **)&t0.context; + (*t1) = a0; + return t0; +} + +static nav__4238_38 io_GenericReader_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7b_7d_2c_28function_20_27read_27_29_29_typeEras__4238(void const *const a0, nav__4238_41 const a1) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t0; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t1; + nav__4238_38 t2; + nav__4238_38 t3; + t0 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)a0; + t1 = (*t0); + t2 = io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_read__4197(t1, a1); + memcpy(&t3, &t2, sizeof(nav__4238_38)); + return t3; +} + +static nav__4280_39 io_Reader_readBytesNoEof__anon_2403__4280(struct io_Reader__2372 const a0) { + struct io_Reader__2372 const *t1; + struct io_Reader__2372 t3; + struct io_Reader__2372 t0; + nav__4280_46 t4; + uint16_t t5; + nav__4280_39 t6; + uint8_t t7[4]; + uint8_t t2[4]; + t0 = a0; + t1 = (struct io_Reader__2372 const *)&t0; + t3 = (*t1); + t4.ptr = &t2[(uintptr_t)0ul]; + t4.len = (uintptr_t)4ul; + t5 = io_Reader_readNoEof__4247(t3, t4); + if (t5) { + memcpy(t6.payload, "\252\252\252\252", sizeof(uint8_t[4])); + t6.error = t5; + return t6; + } + memcpy(t7, (const char *)&t2, sizeof(uint8_t[4])); + memcpy(t6.payload, t7, sizeof(uint8_t[4])); + t6.error = UINT16_C(0); + return t6; +} + +static nav__4285_39 io_Reader_readBytesNoEof__anon_2496__4285(struct io_Reader__2372 const a0) { + struct io_Reader__2372 const *t1; + struct io_Reader__2372 t3; + struct io_Reader__2372 t0; + nav__4285_46 t4; + uint16_t t5; + nav__4285_39 t6; + uint8_t t7[8]; + uint8_t t2[8]; + t0 = a0; + t1 = (struct io_Reader__2372 const *)&t0; + t3 = (*t1); + t4.ptr = &t2[(uintptr_t)0ul]; + t4.len = (uintptr_t)8ul; + t5 = io_Reader_readNoEof__4247(t3, t4); + if (t5) { + memcpy(t6.payload, "\252\252\252\252\252\252\252\252", sizeof(uint8_t[8])); + t6.error = t5; + return t6; + } + memcpy(t7, (const char *)&t2, sizeof(uint8_t[8])); + memcpy(t6.payload, t7, sizeof(uint8_t[8])); + t6.error = UINT16_C(0); + return t6; +} + +static nav__4245_38 io_Reader_readAll__4245(struct io_Reader__2372 const a0, nav__4245_41 const a1) { + uintptr_t t0; + nav__4245_38 t1; + t0 = a1.len; + t1 = io_Reader_readAtLeast__4246(a0, a1, t0); + return t1; +} + +static zig_cold zig_noreturn void bincode_invalidProtocol__anon_2520__4286(void) { + debug_no_panic_call__4312((nav__4286_40){(uint8_t const *)&__anon_2869,(uintptr_t)88ul}, (nav__4286_42){(uintptr_t)0xaaaaaaaaaaaaaaaaul,true}); + zig_unreachable(); +} + +static void mem_Allocator_free__anon_2631__4290(struct mem_Allocator__565 const a0, nav__4290_40 const a1) { + struct mem_Allocator__565 const *t1; + nav__4290_40 t2; + uintptr_t t3; + uint64_t t4; + uint8_t *t6; + uint8_t *t7; + uint8_t *t8; + uint8_t *const *t9; + struct mem_Allocator__565 t10; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t12; + struct mem_Allocator_VTable__568 const *const *t13; + struct mem_Allocator_VTable__568 const *t14; + void (*const *t15)(void *, nav__4290_40, uint8_t, uintptr_t); + void (*t16)(void *, nav__4290_40, uint8_t, uintptr_t); + void *t17; + bool t5; + uint8_t t11; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = mem_sliceAsBytes__anon_2966__4339(a1); + t3 = t2.len; + t4 = t3; + t5 = t4 == UINT64_C(0); + if (t5) { + return; + } + goto zig_block_0; + + zig_block_0:; + t6 = t2.ptr; + t7 = (uint8_t *)t6; + t8 = t7; + t9 = (uint8_t *const *)&t8; + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t2.ptr = t7; + t2.len = t3; + t10 = (*t1); + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t2.ptr = t7; + t2.len = t3; + t11 = mem_Alignment_fromByteUnits__1032((uintptr_t)1ul); + t3 = (uintptr_t)zig_return_address(); + t12 = t10; + t1 = (struct mem_Allocator__565 const *)&t12; + t13 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t14 = (*t13); + t15 = (void (*const *)(void *, nav__4290_40, uint8_t, uintptr_t))&t14->free; + t16 = (*t15); + t17 = t10.ptr; + t16(t17, t2, t11, t3); + return; +} + +static void utils_sleep__4295(uint64_t const a0) { + uint64_t t0; + uint64_t t1; + long *t3; + long t4; + long t7; + nav__4295_42 t5; + struct cimport_struct_timespec__2992 const *t9; + struct cimport_struct_timespec__2992 *t10; + struct cimport_struct_timespec__2992 t13; + struct cimport_struct_timespec__2992 t2; + struct cimport_struct_timespec__2992 t8; + int t11; + int32_t t12; + bool t6; + t0 = a0 / UINT64_C(1000000000); + t1 = a0 % UINT64_C(1000000000); + t3 = (long *)&t2.tv_sec; + t5 = math_cast__anon_3005__5194(t0); + t6 = t5.is_null != true; + if (t6) { + t7 = t5.payload; + t4 = t7; + goto zig_block_0; + } + t4 = LONG_MAX; + goto zig_block_0; + + zig_block_0:; + (*t3) = t4; + t3 = (long *)&t2.tv_nsec; + t5 = math_cast__anon_3005__5194(t1); + t6 = t5.is_null != true; + if (t6) { + t7 = t5.payload; + t4 = t7; + goto zig_block_1; + } + t4 = LONG_MAX; + goto zig_block_1; + + zig_block_1:; + (*t3) = t4; + zig_loop_28: + t9 = (struct cimport_struct_timespec__2992 const *)&t2; + t10 = (struct cimport_struct_timespec__2992 *)&t8; + t11 = nanosleep(t9, t10); + t12 = t11; + t6 = t12 == INT32_C(4); + if (t6) { + t13 = t8; + t2 = t13; + goto zig_block_2; + } + return; + + zig_block_2:; + goto zig_loop_28; +} + +static void shared_Command_deinit__3908(struct shared_Command__2009 const a0, struct mem_Allocator__565 const a1) { + struct mem_Allocator__565 const *t1; + struct shared_Command__struct_2013__2013 t3; + struct mem_Allocator__565 t4; + struct mem_Allocator__565 t0; + nav__3908_43 t5; + struct shared_Command__struct_2012__2012 t6; + uint8_t t2; + t0 = a1; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = a0.tag; + switch (t2) { + case UINT8_C(5): { + t3 = a0.payload.SetIntegration; + t4 = (*t1); + t5 = t3.name; + mem_Allocator_free__anon_3072__5974(t4, t5); + t4 = (*t1); + t5 = t3.version; + mem_Allocator_free__anon_3072__5974(t4, t5); + goto zig_block_0; + } + case UINT8_C(0): { + t6 = a0.payload.ExecutedBenchmark; + t4 = (*t1); + t5 = t6.uri; + mem_Allocator_free__anon_3072__5974(t4, t5); + goto zig_block_0; + } + default: { + goto zig_block_0; + } + } + + zig_block_0:; + return; +} + +static uint8_t mem_Alignment_fromByteUnits__1032(uintptr_t const a0) { + bool t0; + uint8_t t1; + t0 = math_isPowerOfTwo__anon_3074__5975(a0); + debug_assert__180(t0); + t1 = zig_ctz_u64(a0, UINT8_C(64)); + return t1; +} + +static bool fs_path_isAbsolutePosix__3851(nav__3851_39 const a0) { + uintptr_t t0; + uint64_t t1; + bool t2; + bool t3; + uint8_t t4; + t0 = a0.len; + t1 = t0; + t2 = t1 > UINT64_C(0); + if (t2) { + t4 = a0.ptr[(uintptr_t)0ul]; + t2 = t4 == UINT8_C(47); + t3 = t2; + goto zig_block_0; + } + t3 = false; + goto zig_block_0; + + zig_block_0:; + return t3; +} + +static nav__1701_39 posix_toPosixPath__1701(nav__1701_41 const a0) { + uintptr_t t1; + uint64_t t2; + uint8_t *t4; + nav__1701_47 t5; + uint8_t const *t6; + nav__1701_39 t8; + bool t3; + uint8_t t7[4096]; + uint8_t t0[4096]; + t1 = a0.len; + t2 = t1; + t3 = t2 >= UINT64_C(4096); + if (t3) { + return (nav__1701_39){zig_error_NameTooLong,{'\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa','\xaa'}}; + } + goto zig_block_0; + + zig_block_0:; + t1 = a0.len; + t4 = (uint8_t *)&t0; + t4 = (uint8_t *)(((uintptr_t)t4) + ((uintptr_t)0ul*sizeof(uint8_t))); + t5.ptr = t4; + t5.len = t1; + t6 = a0.ptr; + if (t5.len != 0) memcpy(t5.ptr, t6, t5.len * sizeof(uint8_t)); + t1 = a0.len; + t4 = (uint8_t *)&t0[t1]; + (*t4) = UINT8_C(0); + memcpy(t7, (const char *)&t0, sizeof(uint8_t[4096])); + memcpy(t8.payload, t7, sizeof(uint8_t[4096])); + t8.error = UINT16_C(0); + return t8; +} + +static uint16_t fs_Dir_accessZ__3767(struct fs_Dir__1899 const a0, uint8_t const *const a1, struct fs_File_OpenFlags__1897 const a2) { + uint32_t t2; + int32_t t3; + struct fs_Dir__1899 t0; + uint16_t t4; + uint8_t t1; + t0 = a0; + t1 = a2.mode; + switch (t1) { + case UINT8_C(0): { + t2 = UINT32_C(0); + goto zig_block_0; + } + case UINT8_C(1): { + t2 = UINT32_C(2); + goto zig_block_0; + } + case UINT8_C(2): { + t2 = UINT32_C(6); + goto zig_block_0; + } + default: zig_unreachable(); + } + + zig_block_0:; + t3 = a0.fd; + t4 = posix_faccessatZ__1592(t3, a1, t2, UINT32_C(0)); + return t4; +} + +static nav__3703_39 fs_Dir_openFileZ__3703(struct fs_Dir__1899 const a0, uint8_t const *const a1, struct fs_File_OpenFlags__1897 const a2) { + uint32_t *t2; + struct fs_File__608 *t14; + int32_t *t15; + int32_t t6; + int32_t t11; + int32_t t12; + uint32_t t7; + uint32_t t1; + nav__3703_50 t8; + nav__3703_39 t10; + nav__3703_39 t13; + struct fs_Dir__1899 t0; + uint16_t t9; + uint8_t t3; + uint8_t t4; + bool t5; + t0 = a0; + t2 = (uint32_t *)&t1; + t3 = a2.mode; + switch (t3) { + case UINT8_C(0): { + t4 = UINT8_C(0); + goto zig_block_0; + } + case UINT8_C(1): { + t4 = UINT8_C(1); + goto zig_block_0; + } + case UINT8_C(2): { + t4 = UINT8_C(2); + goto zig_block_0; + } + default: zig_unreachable(); + } + + zig_block_0:; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffffffc)), zig_shl_u32((uint32_t)t4, UINT8_C(0))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffffc3)), zig_shl_u32((uint32_t)UINT8_C(0), UINT8_C(2))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffffbf)), zig_shl_u32((uint32_t)false, UINT8_C(6))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffff7f)), zig_shl_u32((uint32_t)false, UINT8_C(7))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffffeff)), zig_shl_u32((uint32_t)false, UINT8_C(8))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffffdff)), zig_shl_u32((uint32_t)false, UINT8_C(9))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffffbff)), zig_shl_u32((uint32_t)false, UINT8_C(10))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffff7ff)), zig_shl_u32((uint32_t)false, UINT8_C(11))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffefff)), zig_shl_u32((uint32_t)false, UINT8_C(12))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffdfff)), zig_shl_u32((uint32_t)false, UINT8_C(13))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffffbfff)), zig_shl_u32((uint32_t)false, UINT8_C(14))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffff7fff)), zig_shl_u32((uint32_t)UINT8_C(0), UINT8_C(15))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffeffff)), zig_shl_u32((uint32_t)false, UINT8_C(16))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffdffff)), zig_shl_u32((uint32_t)false, UINT8_C(17))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffbffff)), zig_shl_u32((uint32_t)false, UINT8_C(18))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfff7ffff)), zig_shl_u32((uint32_t)false, UINT8_C(19))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffefffff)), zig_shl_u32((uint32_t)false, UINT8_C(20))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffdfffff)), zig_shl_u32((uint32_t)false, UINT8_C(21))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xffbfffff)), zig_shl_u32((uint32_t)false, UINT8_C(22))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0x7fffff)), zig_shl_u32((uint32_t)UINT16_C(0), UINT8_C(23))); + t2 = (uint32_t *)&t1; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfff7ffff)), zig_shl_u32((uint32_t)true, UINT8_C(19))); + t2 = (uint32_t *)&t1; + t5 = a2.allow_ctty; + t5 = !t5; + (*t2) = zig_or_u32(zig_and_u32((*t2), UINT32_C(0xfffffeff)), zig_shl_u32((uint32_t)t5, UINT8_C(8))); + t6 = a0.fd; + t7 = t1; + t8 = posix_openatZ__1448(t6, a1, t7, (uintptr_t)0ul); + if (t8.error) { + t9 = t8.error; + t10.payload = (struct fs_File__608){-INT32_C(0x55555556)}; + t10.error = t9; + return t10; + } + t6 = t8.payload; + t4 = a2.lock; + t5 = t4 != UINT8_C(0); + if (t5) { + t5 = a2.lock_nonblocking; + if (t5) { + t11 = INT32_C(4); + goto zig_block_2; + } + t11 = INT32_C(0); + goto zig_block_2; + + zig_block_2:; + t4 = a2.lock; + switch (t4) { + case UINT8_C(0): { + zig_unreachable(); + } + case UINT8_C(1): { + t11 = INT32_C(1) | t11; + t12 = t11; + goto zig_block_3; + } + case UINT8_C(2): { + t11 = INT32_C(2) | t11; + t12 = t11; + goto zig_block_3; + } + default: zig_unreachable(); + } + + zig_block_3:; + t9 = posix_flock__1609(t6, t12); + if (t9) { + posix_close__1406(t6); + t10.payload = (struct fs_File__608){-INT32_C(0x55555556)}; + t10.error = t9; + return t10; + } + goto zig_block_1; + } + goto zig_block_1; + + zig_block_1:; + t13.error = UINT16_C(0); + t14 = &t13.payload; + t15 = (int32_t *)&t14->handle; + (*t15) = t6; + return t13; +} + +static uint16_t bincode_serializeUnion__anon_2782__4309(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__2009 const a1) { + struct shared_Command__struct_2012__2012 t4; + struct shared_Command__struct_2013__2013 t5; + uint32_t t1; + uint16_t t2; + uint8_t t0; + bool t3; + t0 = a1.tag; + t1 = (uint32_t)t0; + t2 = bincode_serialize__anon_3213__6100(a0, t1); + if (t2) { + return t2; + } + t0 = a1.tag; + t3 = t0 == UINT8_C(0); + if (t3) { + t4 = a1.payload.ExecutedBenchmark; + t2 = bincode_serialize__anon_3215__6101(a0, t4); + if (t2) { + return t2; + } + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + t0 = a1.tag; + t3 = t0 == UINT8_C(1); + if (t3) { + t2 = bincode_serialize__anon_3217__6102(a0); + if (t2) { + return t2; + } + goto zig_block_1; + } + goto zig_block_1; + + zig_block_1:; + t0 = a1.tag; + t3 = t0 == UINT8_C(2); + if (t3) { + t2 = bincode_serialize__anon_3217__6102(a0); + if (t2) { + return t2; + } + goto zig_block_2; + } + goto zig_block_2; + + zig_block_2:; + t0 = a1.tag; + t3 = t0 == UINT8_C(3); + if (t3) { + t2 = bincode_serialize__anon_3217__6102(a0); + if (t2) { + return t2; + } + goto zig_block_3; + } + goto zig_block_3; + + zig_block_3:; + t0 = a1.tag; + t3 = t0 == UINT8_C(4); + if (t3) { + t2 = bincode_serialize__anon_3217__6102(a0); + if (t2) { + return t2; + } + goto zig_block_4; + } + goto zig_block_4; + + zig_block_4:; + t0 = a1.tag; + t3 = t0 == UINT8_C(5); + if (t3) { + t5 = a1.payload.SetIntegration; + t2 = bincode_serialize__anon_3219__6103(a0, t5); + if (t2) { + return t2; + } + goto zig_block_5; + } + goto zig_block_5; + + zig_block_5:; + t0 = a1.tag; + t3 = t0 == UINT8_C(6); + if (t3) { + t2 = bincode_serialize__anon_3217__6102(a0); + if (t2) { + return t2; + } + goto zig_block_6; + } + goto zig_block_6; + + zig_block_6:; + return 0; +} + +static nav__4006_39 array_list_ArrayListAligned_28u8_2cnull_29_allocatedSlice__4006(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const a0) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const *t1; + nav__4006_39 const *t2; + uint8_t *const *t3; + uintptr_t t4; + uint8_t *t5; + nav__4006_39 t6; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t0; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 const *)&t0; + t2 = (nav__4006_39 const *)&t1->items; + t3 = &t2->ptr; + t4 = a0.capacity; + t5 = (*t3); + t5 = (uint8_t *)(((uintptr_t)t5) + ((uintptr_t)0ul*sizeof(uint8_t))); + t6.ptr = t5; + t6.len = t4; + return t6; +} + +static nav__1216_38 fs_File_write__1216(struct fs_File__608 const a0, nav__1216_41 const a1) { + nav__1216_38 t1; + int32_t t0; + t0 = a0.handle; + t1 = posix_write__1436(t0, a1); + return t1; +} + +static nav__1617_39 posix_clock_gettime__1617(uint32_t const a0) { + struct os_linux_timespec__struct_2818__2818 t3; + struct os_linux_timespec__struct_2818__2818 t0; + nav__1617_39 t4; + int t1; + uint16_t t2; + t1 = clock_gettime(a0, &t0); + t2 = posix_errno__anon_3383__6106(t1); + switch (t2) { + case UINT16_C(0): { + t3 = t0; + t4.payload = t3; + t4.error = UINT16_C(0); + return t4; + } + case UINT16_C(14): { + zig_unreachable(); + } + case UINT16_C(22): { + return (nav__1617_39){{-(intptr_t)0x5555555555555556,-(intptr_t)0x5555555555555556},zig_error_UnsupportedClock}; + } + default: { + t2 = posix_unexpectedErrno__1700(t2); + t4.payload = (struct os_linux_timespec__struct_2818__2818){-(intptr_t)0x5555555555555556,-(intptr_t)0x5555555555555556}; + t4.error = t2; + return t4; + } + } +} + +static nav__1206_38 fs_File_read__1206(struct fs_File__608 const a0, nav__1206_41 const a1) { + nav__1206_38 t1; + int32_t t0; + t0 = a0.handle; + t1 = posix_read__1428(t0, a1); + return t1; +} + +static nav__4310_39 mem_Allocator_allocWithSizeAndAlignment__anon_2849__4310(struct mem_Allocator__565 const a0, uintptr_t const a1, uintptr_t const a2) { + struct mem_Allocator__565 const *t1; + uintptr_t t2; + uintptr_t t5; + nav__4310_49 t3; + struct mem_Allocator__565 t6; + struct mem_Allocator__565 t0; + nav__4310_39 t7; + bool t4; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t3 = math_mul__anon_3404__6107((uintptr_t)1ul, a1); + t4 = t3.error == UINT16_C(0); + if (t4) { + t5 = t3.payload; + t2 = t5; + goto zig_block_0; + } + return (nav__4310_39){((uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),zig_error_OutOfMemory}; + + zig_block_0:; + t6 = (*t1); + t7 = mem_Allocator_allocBytesWithAlignment__anon_3406__6108(t6, t2, a2); + return t7; +} + +static nav__4197_38 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_read__4197(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const a0, nav__4197_42 const a1) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t1; + nav__4197_42 const *t3; + uintptr_t t4; + uintptr_t t7; + uintptr_t t9; + nav__4197_42 *t5; + nav__4197_42 t6; + nav__4197_42 t13; + nav__4197_42 t2; + uintptr_t *t8; + uint64_t t10; + uint8_t *t11; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t12; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t0; + nav__4197_38 t14; + t0 = a0; + t1 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t0; + t2 = a1; + t3 = (nav__4197_42 const *)&t2; + t4 = a1.len; + t5 = (nav__4197_42 *)&a0->buffer; + t6 = (*t5); + t7 = t6.len; + t8 = (uintptr_t *)&a0->pos; + t9 = (*t8); + t9 = t7 - t9; + t9 = (t4 < t9) ? t4 : t9; + t10 = t9; + t8 = (uintptr_t *)&a0->pos; + t9 = (*t8); + t4 = t10; + t4 = t9 + t4; + t6 = (*t3); + t11 = t6.ptr; + t11 = (uint8_t *)(((uintptr_t)t11) + ((uintptr_t)0ul*sizeof(uint8_t))); + t9 = t10; + t6.ptr = t11; + t6.len = t9; + t12 = (*t1); + t5 = (nav__4197_42 *)&t12->buffer; + t8 = (uintptr_t *)&a0->pos; + t9 = (*t8); + t13 = (*t5); + t11 = t13.ptr; + t11 = (uint8_t *)(((uintptr_t)t11) + (t9*sizeof(uint8_t))); + t9 = t4 - t9; + t13.ptr = t11; + t13.len = t9; + t11 = t13.ptr; + if (t6.len != 0) memcpy(t6.ptr, t11, t6.len * sizeof(uint8_t)); + t12 = (*t1); + t8 = (uintptr_t *)&t12->pos; + (*t8) = t4; + t4 = t10; + t14.payload = t4; + t14.error = UINT16_C(0); + return t14; +} + +static uint16_t io_Reader_readNoEof__4247(struct io_Reader__2372 const a0, nav__4247_40 const a1) { + struct io_Reader__2372 const *t1; + struct io_Reader__2372 t2; + struct io_Reader__2372 t0; + nav__4247_43 t3; + uintptr_t t5; + uintptr_t t6; + uint64_t t7; + uint64_t t8; + uint16_t t4; + bool t9; + t0 = a0; + t1 = (struct io_Reader__2372 const *)&t0; + t2 = (*t1); + t3 = io_Reader_readAll__4245(t2, a1); + if (t3.error) { + t4 = t3.error; + return t4; + } + t5 = t3.payload; + t6 = a1.len; + t7 = t5; + t8 = t6; + t9 = t7 < t8; + if (t9) { + return zig_error_EndOfStream; + } + goto zig_block_0; + + zig_block_0:; + return 0; +} + +static nav__4246_38 io_Reader_readAtLeast__4246(struct io_Reader__2372 const a0, nav__4246_41 const a1, uintptr_t const a2) { + struct io_Reader__2372 const *t1; + nav__4246_41 const *t3; + uintptr_t t4; + uintptr_t t12; + uintptr_t t8; + uint64_t t5; + uint64_t t6; + struct io_Reader__2372 t9; + struct io_Reader__2372 t0; + nav__4246_41 t10; + nav__4246_41 t2; + uint8_t *t11; + nav__4246_38 t13; + uint16_t t14; + bool t7; + t0 = a0; + t1 = (struct io_Reader__2372 const *)&t0; + t2 = a1; + t3 = (nav__4246_41 const *)&t2; + t4 = a1.len; + t5 = a2; + t6 = t4; + t7 = t5 <= t6; + debug_assert__180(t7); + t8 = (uintptr_t)0ul; + zig_loop_17: + t4 = t8; + t6 = t4; + t5 = a2; + t7 = t6 < t5; + if (t7) { + t9 = (*t1); + t4 = t8; + t10 = (*t3); + t11 = t10.ptr; + t11 = (uint8_t *)(((uintptr_t)t11) + (t4*sizeof(uint8_t))); + t12 = t10.len; + t4 = t12 - t4; + t10.ptr = t11; + t10.len = t4; + t13 = io_Reader_read__4244(t9, t10); + if (t13.error) { + t14 = t13.error; + t13.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t13.error = t14; + return t13; + } + t4 = t13.payload; + t5 = t4; + t7 = t5 == UINT64_C(0); + if (t7) { + goto zig_block_0; + } + goto zig_block_2; + + zig_block_2:; + t12 = t8; + t4 = t12 + t4; + t8 = t4; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_17; + + zig_block_0:; + t12 = t8; + t13.payload = t12; + t13.error = UINT16_C(0); + return t13; +} + +static zig_cold zig_noreturn void debug_no_panic_call__4312(nav__4312_39 const a0, nav__4312_40 const a1) { + (void)a0; + (void)a1; + zig_trap(); +} + +static nav__4339_39 mem_sliceAsBytes__anon_2966__4339(nav__4339_39 const a0) { + uintptr_t t0; + uint64_t t1; + uint8_t *t4; + uint8_t *t5; + uint8_t *const *t6; + nav__4339_39 t7; + bool t2; + bool t3; + t0 = a0.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + t3 = true; + goto zig_block_1; + } + t3 = false; + goto zig_block_1; + + zig_block_1:; + if (t3) { + return (nav__4339_39){(uint8_t *)((void const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),(uintptr_t)0ul}; + } + goto zig_block_0; + + zig_block_0:; + t4 = a0.ptr; + t5 = t4; + t6 = (uint8_t *const *)&t5; + t0 = a0.len; + t4 = (*t6); + t4 = (uint8_t *)(((uintptr_t)t4) + ((uintptr_t)0ul*sizeof(uint8_t))); + t7.ptr = t4; + t7.len = t0; + return t7; +} + +static nav__5194_38 math_cast__anon_3005__5194(uint64_t const a0) { + long t1; + nav__5194_38 t2; + bool t0; + t0 = a0 > UINT64_C(9223372036854775807); + if (t0) { + return (nav__5194_38){-0x5555555555555556l,true}; + } + t1 = (long)a0; + t2.is_null = false; + t2.payload = t1; + return t2; +} + +static void mem_Allocator_free__anon_3072__5974(struct mem_Allocator__565 const a0, nav__5974_40 const a1) { + struct mem_Allocator__565 const *t1; + nav__5974_40 t2; + uintptr_t t3; + uint64_t t4; + uint8_t const *t6; + uint8_t *t7; + uint8_t *t8; + uint8_t *const *t9; + nav__5974_51 t10; + struct mem_Allocator__565 t11; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t13; + struct mem_Allocator_VTable__568 const *const *t14; + struct mem_Allocator_VTable__568 const *t15; + void (*const *t16)(void *, nav__5974_51, uint8_t, uintptr_t); + void (*t17)(void *, nav__5974_51, uint8_t, uintptr_t); + void *t18; + bool t5; + uint8_t t12; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = mem_sliceAsBytes__anon_3437__6109(a1); + t3 = t2.len; + t4 = t3; + t5 = t4 == UINT64_C(0); + if (t5) { + return; + } + goto zig_block_0; + + zig_block_0:; + t6 = t2.ptr; + t7 = (uint8_t *)t6; + t8 = t7; + t9 = (uint8_t *const *)&t8; + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t10.ptr = t7; + t10.len = t3; + t11 = (*t1); + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t10.ptr = t7; + t10.len = t3; + t12 = mem_Alignment_fromByteUnits__1032((uintptr_t)1ul); + t3 = (uintptr_t)zig_return_address(); + t13 = t11; + t1 = (struct mem_Allocator__565 const *)&t13; + t14 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t15 = (*t14); + t16 = (void (*const *)(void *, nav__5974_51, uint8_t, uintptr_t))&t15->free; + t17 = (*t16); + t18 = t11.ptr; + t17(t18, t10, t12, t3); + return; +} + +static bool math_isPowerOfTwo__anon_3074__5975(uintptr_t const a0) { + uint64_t t0; + uintptr_t t2; + bool t1; + t0 = a0; + t1 = t0 > UINT64_C(0); + debug_assert__180(t1); + t2 = a0 - (uintptr_t)1ul; + t2 = a0 & t2; + t0 = t2; + t1 = t0 == UINT64_C(0); + return t1; +} + +static uint16_t posix_faccessatZ__1592(int32_t const a0, uint8_t const *const a1, uint32_t const a2, uint32_t const a3) { + unsigned int t0; + unsigned int t1; + int t2; + uint16_t t3; + t0 = a2; + t1 = a3; + t2 = faccessat(a0, a1, t0, t1); + t3 = posix_errno__anon_3383__6106(t2); + switch (t3) { + case UINT16_C(0): { + return 0; + } + case UINT16_C(13): { + return zig_error_PermissionDenied; + } + case UINT16_C(1): { + return zig_error_PermissionDenied; + } + case UINT16_C(30): { + return zig_error_ReadOnlyFileSystem; + } + case UINT16_C(40): { + return zig_error_SymLinkLoop; + } + case UINT16_C(26): { + return zig_error_FileBusy; + } + case UINT16_C(20): { + return zig_error_FileNotFound; + } + case UINT16_C(2): { + return zig_error_FileNotFound; + } + case UINT16_C(36): { + return zig_error_NameTooLong; + } + case UINT16_C(22): { + zig_unreachable(); + } + case UINT16_C(14): { + zig_unreachable(); + } + case UINT16_C(5): { + return zig_error_InputOutput; + } + case UINT16_C(12): { + return zig_error_SystemResources; + } + case UINT16_C(84): { + t3 = posix_unexpectedErrno__1700(t3); + return t3; + } + default: { + t3 = posix_unexpectedErrno__1700(t3); + return t3; + } + } +} + +static nav__1448_38 posix_openatZ__1448(int32_t const a0, uint8_t const *const a1, uint32_t const a2, uintptr_t const a3) { + unsigned long t1; + int t0; + int32_t t3; + nav__1448_38 t4; + uint16_t t2; + zig_loop_11: + t0 = a0; + t1 = a3; + t0 = openat64(t0, a1, a2, t1); + t2 = posix_errno__anon_3383__6106(t0); + switch (t2) { + case UINT16_C(0): { + t3 = t0; + t4.payload = t3; + t4.error = UINT16_C(0); + return t4; + } + case UINT16_C(4): { + goto zig_block_0; + } + case UINT16_C(14): { + zig_unreachable(); + } + case UINT16_C(22): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_BadPathName}; + } + case UINT16_C(9): { + zig_unreachable(); + } + case UINT16_C(13): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_AccessDenied}; + } + case UINT16_C(27): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_FileTooBig}; + } + case UINT16_C(75): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_FileTooBig}; + } + case UINT16_C(21): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_IsDir}; + } + case UINT16_C(40): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_SymLinkLoop}; + } + case UINT16_C(24): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_ProcessFdQuotaExceeded}; + } + case UINT16_C(36): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_NameTooLong}; + } + case UINT16_C(23): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_SystemFdQuotaExceeded}; + } + case UINT16_C(19): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_NoDevice}; + } + case UINT16_C(2): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_FileNotFound}; + } + case UINT16_C(12): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_SystemResources}; + } + case UINT16_C(28): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_NoSpaceLeft}; + } + case UINT16_C(20): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_NotDir}; + } + case UINT16_C(1): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_AccessDenied}; + } + case UINT16_C(17): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_PathAlreadyExists}; + } + case UINT16_C(16): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_DeviceBusy}; + } + case UINT16_C(95): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_FileLocksNotSupported}; + } + case UINT16_C(11): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_WouldBlock}; + } + case UINT16_C(26): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_FileBusy}; + } + case UINT16_C(6): { + return (nav__1448_38){-INT32_C(0x55555556),zig_error_NoDevice}; + } + case UINT16_C(84): { + t2 = posix_unexpectedErrno__1700(t2); + t4.payload = -INT32_C(0x55555556); + t4.error = t2; + return t4; + } + default: { + t2 = posix_unexpectedErrno__1700(t2); + t4.payload = -INT32_C(0x55555556); + t4.error = t2; + return t4; + } + } + + zig_block_0:; + goto zig_loop_11; +} + +static uint16_t posix_flock__1609(int32_t const a0, int32_t const a1) { + int t0; + uint16_t t1; + zig_loop_3: + t0 = a1; + t0 = flock(a0, t0); + t1 = posix_errno__anon_3383__6106(t0); + switch (t1) { + case UINT16_C(0): { + return 0; + } + case UINT16_C(9): { + zig_unreachable(); + } + case UINT16_C(4): { + goto zig_block_0; + } + case UINT16_C(22): { + zig_unreachable(); + } + case UINT16_C(37): { + return zig_error_SystemResources; + } + case UINT16_C(11): { + return zig_error_WouldBlock; + } + case UINT16_C(95): { + return zig_error_FileLocksNotSupported; + } + default: { + t1 = posix_unexpectedErrno__1700(t1); + return t1; + } + } + + zig_block_0:; + goto zig_loop_3; +} + +static void posix_close__1406(int32_t const a0) { + int t0; + uint16_t t1; + t0 = close(a0); + t1 = posix_errno__anon_3383__6106(t0); + switch (t1) { + case UINT16_C(9): { + zig_unreachable(); + } + case UINT16_C(4): { + return; + } + default: { + return; + } + } +} + +static uint16_t bincode_serialize__anon_3213__6100(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, uint32_t const a1) { + uint16_t t0; + t0 = bincode_serializeInt__anon_3599__6113(a0, a1); + if (t0) { + return t0; + } + return 0; +} + +static uint16_t bincode_serialize__anon_3215__6101(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__struct_2012__2012 const a1) { + uint16_t t0; + t0 = bincode_serializeStruct__anon_3604__6114(a0, a1); + if (t0) { + return t0; + } + return 0; +} + +static uint16_t bincode_serialize__anon_3217__6102(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0) { + (void)a0; + return 0; +} + +static uint16_t bincode_serialize__anon_3219__6103(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__struct_2013__2013 const a1) { + uint16_t t0; + t0 = bincode_serializeStruct__anon_3605__6115(a0, a1); + if (t0) { + return t0; + } + return 0; +} + +static nav__1436_38 posix_write__1436(int32_t const a0, nav__1436_40 const a1) { + uintptr_t t0; + uint64_t t1; + uint8_t const *t3; + intptr_t t5; + nav__1436_38 t7; + uint32_t t4; + uint16_t t6; + bool t2; + t0 = a1.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + return (nav__1436_38){(uintptr_t)0ul,0}; + } + goto zig_block_0; + + zig_block_0:; + zig_loop_16: + t3 = a1.ptr; + t0 = a1.len; + t0 = ((uintptr_t)2147479552ul < t0) ? (uintptr_t)2147479552ul : t0; + t4 = (uint32_t)t0; + t0 = (uintptr_t)t4; + t5 = write(a0, t3, t0); + t6 = posix_errno__anon_3616__6117(t5); + switch (t6) { + case UINT16_C(0): { + t0 = (uintptr_t)t5; + t7.payload = t0; + t7.error = UINT16_C(0); + return t7; + } + case UINT16_C(4): { + goto zig_block_1; + } + case UINT16_C(22): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_InvalidArgument}; + } + case UINT16_C(14): { + zig_unreachable(); + } + case UINT16_C(2): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_ProcessNotFound}; + } + case UINT16_C(11): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_WouldBlock}; + } + case UINT16_C(9): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NotOpenForWriting}; + } + case UINT16_C(89): { + zig_unreachable(); + } + case UINT16_C(122): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_DiskQuota}; + } + case UINT16_C(27): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_FileTooBig}; + } + case UINT16_C(5): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_InputOutput}; + } + case UINT16_C(28): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NoSpaceLeft}; + } + case UINT16_C(13): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_AccessDenied}; + } + case UINT16_C(1): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_AccessDenied}; + } + case UINT16_C(32): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_BrokenPipe}; + } + case UINT16_C(104): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_ConnectionResetByPeer}; + } + case UINT16_C(16): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_DeviceBusy}; + } + case UINT16_C(6): { + return (nav__1436_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NoDevice}; + } + default: { + t6 = posix_unexpectedErrno__1700(t6); + t7.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t7.error = t6; + return t7; + } + } + + zig_block_1:; + goto zig_loop_16; +} + +static uint16_t posix_errno__anon_3383__6106(int const a0) { + int *t3; + int32_t t1; + int t4; + uint16_t t0; + uint16_t t5; + bool t2; + t1 = a0; + t2 = t1 == -INT32_C(1); + if (t2) { + t3 = zig_e___errno_location(); + t4 = (*t3); + t5 = (uint16_t)t4; + t0 = t5; + goto zig_block_0; + } + t0 = UINT16_C(0); + goto zig_block_0; + + zig_block_0:; + return t0; +} + +static uint16_t posix_unexpectedErrno__1700(uint16_t const a0) { + (void)a0; + return zig_error_Unexpected; +} + +static nav__1428_38 posix_read__1428(int32_t const a0, nav__1428_40 const a1) { + uintptr_t t0; + uint64_t t1; + uint8_t *t3; + intptr_t t5; + nav__1428_38 t7; + uint32_t t4; + uint16_t t6; + bool t2; + t0 = a1.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + return (nav__1428_38){(uintptr_t)0ul,0}; + } + goto zig_block_0; + + zig_block_0:; + zig_loop_16: + t3 = a1.ptr; + t0 = a1.len; + t0 = ((uintptr_t)2147479552ul < t0) ? (uintptr_t)2147479552ul : t0; + t4 = (uint32_t)t0; + t0 = (uintptr_t)t4; + t5 = read(a0, t3, t0); + t6 = posix_errno__anon_3616__6117(t5); + switch (t6) { + case UINT16_C(0): { + t0 = (uintptr_t)t5; + t7.payload = t0; + t7.error = UINT16_C(0); + return t7; + } + case UINT16_C(4): { + goto zig_block_1; + } + case UINT16_C(22): { + zig_unreachable(); + } + case UINT16_C(14): { + zig_unreachable(); + } + case UINT16_C(2): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_ProcessNotFound}; + } + case UINT16_C(11): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_WouldBlock}; + } + case UINT16_C(125): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_Canceled}; + } + case UINT16_C(9): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NotOpenForReading}; + } + case UINT16_C(5): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_InputOutput}; + } + case UINT16_C(21): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_IsDir}; + } + case UINT16_C(105): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_SystemResources}; + } + case UINT16_C(12): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_SystemResources}; + } + case UINT16_C(107): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_SocketNotConnected}; + } + case UINT16_C(104): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_ConnectionResetByPeer}; + } + case UINT16_C(110): { + return (nav__1428_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_ConnectionTimedOut}; + } + default: { + t6 = posix_unexpectedErrno__1700(t6); + t7.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t7.error = t6; + return t7; + } + } + + zig_block_1:; + goto zig_loop_16; +} + +static nav__6107_38 math_mul__anon_3404__6107(uintptr_t const a0, uintptr_t const a1) { + nav__6107_42 t0; + uintptr_t t3; + nav__6107_38 t4; + uint8_t t1; + bool t2; + t0.f1 = zig_mulo_u64(&t0.f0, a0, a1, UINT8_C(64)); + t1 = t0.f1; + t2 = t1 != UINT8_C(0); + if (t2) { + return (nav__6107_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_Overflow}; + } + goto zig_block_0; + + zig_block_0:; + t3 = t0.f0; + t4.payload = t3; + t4.error = UINT16_C(0); + return t4; +} + +static nav__6108_39 mem_Allocator_allocBytesWithAlignment__anon_3406__6108(struct mem_Allocator__565 const a0, uintptr_t const a1, uintptr_t const a2) { + struct mem_Allocator__565 const *t1; + uint64_t t2; + uint8_t *t4; + uint8_t *t13; + uint8_t *t14; + uint8_t *t15; + struct mem_Allocator__565 t5; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t7; + struct mem_Allocator_VTable__568 const *const *t8; + struct mem_Allocator_VTable__568 const *t9; + uint8_t *(*const *t10)(void *, uintptr_t, uint8_t, uintptr_t); + uint8_t *(*t11)(void *, uintptr_t, uint8_t, uintptr_t); + void *t12; + uint8_t *const *t16; + nav__6108_52 t17; + nav__6108_39 t18; + bool t3; + uint8_t t6; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = a1; + t3 = t2 == UINT64_C(0); + if (t3) { + return (nav__6108_39){(uint8_t *)UINTPTR_MAX,0}; + } + goto zig_block_0; + + zig_block_0:; + t5 = (*t1); + t6 = mem_Alignment_fromByteUnits__1032((uintptr_t)1ul); + t7 = t5; + t1 = (struct mem_Allocator__565 const *)&t7; + t8 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t9 = (*t8); + t10 = (uint8_t *(*const *)(void *, uintptr_t, uint8_t, uintptr_t))&t9->alloc; + t11 = (*t10); + t12 = t5.ptr; + t13 = t11(t12, a1, t6, a2); + t3 = t13 != NULL; + if (t3) { + t14 = t13; + t4 = t14; + goto zig_block_1; + } + return (nav__6108_39){((uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),zig_error_OutOfMemory}; + + zig_block_1:; + t15 = t4; + t16 = (uint8_t *const *)&t15; + t14 = (*t16); + t14 = (uint8_t *)(((uintptr_t)t14) + ((uintptr_t)0ul*sizeof(uint8_t))); + t17.ptr = t14; + t17.len = a1; + t18.payload = t4; + t18.error = UINT16_C(0); + return t18; +} + +static nav__4244_38 io_Reader_read__4244(struct io_Reader__2372 const a0, nav__4244_41 const a1) { + struct io_Reader__2372 const *t1; + nav__4244_38 (*const *t2)(void const *, nav__4244_41); + nav__4244_38 (*t3)(void const *, nav__4244_41); + void const *t4; + nav__4244_38 t5; + struct io_Reader__2372 t0; + t0 = a0; + t1 = (struct io_Reader__2372 const *)&t0; + t2 = (nav__4244_38 (*const *)(void const *, nav__4244_41))&t1->readFn; + t3 = (*t2); + t4 = a0.context; + t5 = t3(t4, a1); + return t5; +} + +static nav__6109_39 mem_sliceAsBytes__anon_3437__6109(nav__6109_39 const a0) { + uintptr_t t0; + uint64_t t1; + uint8_t const *t4; + uint8_t const *t5; + uint8_t const *const *t6; + nav__6109_39 t7; + bool t2; + bool t3; + t0 = a0.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + t3 = true; + goto zig_block_1; + } + t3 = false; + goto zig_block_1; + + zig_block_1:; + if (t3) { + return (nav__6109_39){(uint8_t const *)((void const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),(uintptr_t)0ul}; + } + goto zig_block_0; + + zig_block_0:; + t4 = a0.ptr; + t5 = t4; + t6 = (uint8_t const *const *)&t5; + t0 = a0.len; + t4 = (*t6); + t4 = (uint8_t const *)(((uintptr_t)t4) + ((uintptr_t)0ul*sizeof(uint8_t))); + t7.ptr = t4; + t7.len = t0; + return t7; +} + +static uint16_t bincode_serializeInt__anon_3599__6113(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, uint32_t const a1) { + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t1; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t4; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t2; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t0; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t3; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *const *t5; + void const **t7; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t8; + void const *t9; + nav__6113_48 (**t10)(void const *, nav__6113_50); + struct io_Writer__3718 t11; + struct io_Writer__3718 t6; + struct io_Writer__3718 t12; + struct io_Writer__3718 t14; + struct io_Writer__3718 const *t13; + nav__6113_50 t17; + uint16_t t18; + uint16_t t19; + uint8_t t16[4]; + uint8_t t15[4]; + t0 = a0; + t1 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *)&t0; + t2 = (*t1); + t3 = t2; + t1 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *)&t3; + t4 = t1; + t5 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *const *)&t4; + t7 = (void const **)&t6.context; + t1 = (*t5); + t8 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t1->context; + t9 = (void const *)t8; + (*t7) = t9; + t10 = (nav__6113_48 (**)(void const *, nav__6113_50))&t6.writeFn; + (*t10) = &io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29___4130; + t11 = t6; + t12 = t11; + t13 = (struct io_Writer__3718 const *)&t12; + t11 = (*t13); + t14 = t11; + t13 = (struct io_Writer__3718 const *)&t14; + memcpy(&t16, &a1, sizeof(uint8_t[4])); + memcpy((char *)&t15, t16, sizeof(uint8_t[4])); + t11 = (*t13); + t17.ptr = &t15[(uintptr_t)0ul]; + t17.len = (uintptr_t)4ul; + t18 = io_Writer_writeAll__6127(t11, t17); + memcpy(&t19, &t18, sizeof(uint16_t)); + if (t19) { + return t19; + } + return 0; +} + +static uint16_t bincode_serializeStruct__anon_3604__6114(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__struct_2012__2012 const a1) { + nav__6114_45 t2; + uint32_t t0; + uint16_t t1; + t0 = a1.pid; + t1 = bincode_serialize__anon_3213__6100(a0, t0); + if (t1) { + return t1; + } + t2 = a1.uri; + t1 = bincode_serialize__anon_3754__6136(a0, t2); + if (t1) { + return t1; + } + return 0; +} + +static uint16_t bincode_serializeStruct__anon_3605__6115(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, struct shared_Command__struct_2013__2013 const a1) { + nav__6115_45 t0; + uint16_t t1; + t0 = a1.name; + t1 = bincode_serialize__anon_3754__6136(a0, t0); + if (t1) { + return t1; + } + t0 = a1.version; + t1 = bincode_serialize__anon_3754__6136(a0, t0); + if (t1) { + return t1; + } + return 0; +} + +static uint16_t posix_errno__anon_3616__6117(intptr_t const a0) { + int64_t t1; + int *t3; + int t4; + uint16_t t0; + uint16_t t5; + bool t2; + t1 = a0; + t2 = t1 == -INT64_C(1); + if (t2) { + t3 = zig_e___errno_location(); + t4 = (*t3); + t5 = (uint16_t)t4; + t0 = t5; + goto zig_block_0; + } + t0 = UINT16_C(0); + goto zig_block_0; + + zig_block_0:; + return t0; +} + +static nav__4130_38 io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29___4130(void const *const a0, nav__4130_41 const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t0; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t1; + nav__4130_38 t2; + nav__4130_38 t3; + t0 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)a0; + t1 = (*t0); + t2 = array_list_ArrayListAligned_28u8_2cnull_29_appendWrite__3984(t1, a1); + memcpy(&t3, &t2, sizeof(nav__4130_38)); + return t3; +} + +static uint16_t io_Writer_writeAll__6127(struct io_Writer__3718 const a0, nav__6127_40 const a1) { + struct io_Writer__3718 const *t1; + nav__6127_40 const *t3; + uintptr_t t5; + uintptr_t t6; + uintptr_t t13; + uintptr_t t4; + uint64_t t7; + uint64_t t8; + struct io_Writer__3718 t10; + struct io_Writer__3718 t0; + nav__6127_40 t11; + nav__6127_40 t2; + uint8_t const *t12; + nav__6127_43 t14; + uint16_t t15; + bool t9; + t0 = a0; + t1 = (struct io_Writer__3718 const *)&t0; + t2 = a1; + t3 = (nav__6127_40 const *)&t2; + t4 = (uintptr_t)0ul; + zig_loop_11: + t5 = t4; + t6 = a1.len; + t7 = t5; + t8 = t6; + t9 = t7 != t8; + if (t9) { + t6 = t4; + t10 = (*t1); + t5 = t4; + t11 = (*t3); + t12 = t11.ptr; + t12 = (uint8_t const *)(((uintptr_t)t12) + (t5*sizeof(uint8_t))); + t13 = t11.len; + t5 = t13 - t5; + t11.ptr = t12; + t11.len = t5; + t14 = io_Writer_write__6126(t10, t11); + if (t14.error) { + t15 = t14.error; + return t15; + } + t5 = t14.payload; + t5 = t6 + t5; + t4 = t5; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_11; + + zig_block_0:; + return 0; +} + +static uint16_t bincode_serialize__anon_3754__6136(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, nav__6136_40 const a1) { + uint16_t t0; + t0 = bincode_serializePointer__anon_3766__6137(a0, a1); + if (t0) { + return t0; + } + return 0; +} + +static nav__3984_38 array_list_ArrayListAligned_28u8_2cnull_29_appendWrite__3984(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, nav__3984_42 const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t2; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + nav__3984_38 t4; + uintptr_t t5; + uint16_t t3; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (*t1); + t3 = array_list_ArrayListAligned_28u8_2cnull_29_appendSlice__3978(t2, a1); + if (t3) { + t4.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t4.error = t3; + return t4; + } + t5 = a1.len; + t4.payload = t5; + t4.error = UINT16_C(0); + return t4; +} + +static nav__6126_38 io_Writer_write__6126(struct io_Writer__3718 const a0, nav__6126_41 const a1) { + struct io_Writer__3718 const *t1; + nav__6126_38 (*const *t2)(void const *, nav__6126_41); + nav__6126_38 (*t3)(void const *, nav__6126_41); + void const *t4; + nav__6126_38 t5; + struct io_Writer__3718 t0; + t0 = a0; + t1 = (struct io_Writer__3718 const *)&t0; + t2 = (nav__6126_38 (*const *)(void const *, nav__6126_41))&t1->writeFn; + t3 = (*t2); + t4 = a0.context; + t5 = t3(t4, a1); + return t5; +} + +static uint16_t bincode_serializePointer__anon_3766__6137(struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const a0, nav__6137_40 const a1) { + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t1; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t6; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t7; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *t24; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t2; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t0; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t5; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 t23; + uintptr_t t3; + uint64_t t4; + struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *const *t8; + void const **t10; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t11; + void const *t12; + nav__6137_51 (**t13)(void const *, nav__6137_40); + struct io_Writer__3718 t14; + struct io_Writer__3718 t9; + struct io_Writer__3718 t15; + struct io_Writer__3718 t17; + struct io_Writer__3718 t25; + struct io_Writer__3718 t26; + struct io_Writer__3718 const *t16; + nav__6137_40 t20; + uint16_t t21; + uint16_t t22; + uint8_t t19[8]; + uint8_t t18[8]; + t0 = a0; + t1 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *)&t0; + t2 = (*t1); + t3 = a1.len; + t4 = t3; + t5 = t2; + t6 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *)&t5; + t7 = t6; + t8 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *const *)&t7; + t10 = (void const **)&t9.context; + t6 = (*t8); + t11 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t6->context; + t12 = (void const *)t11; + (*t10) = t12; + t13 = (nav__6137_51 (**)(void const *, nav__6137_40))&t9.writeFn; + (*t13) = &io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29___4130; + t14 = t9; + t15 = t14; + t16 = (struct io_Writer__3718 const *)&t15; + t14 = (*t16); + t17 = t14; + t16 = (struct io_Writer__3718 const *)&t17; + memcpy(&t19, &t4, sizeof(uint8_t[8])); + memcpy((char *)&t18, t19, sizeof(uint8_t[8])); + t14 = (*t16); + t20.ptr = &t18[(uintptr_t)0ul]; + t20.len = (uintptr_t)8ul; + t21 = io_Writer_writeAll__6127(t14, t20); + memcpy(&t22, &t21, sizeof(uint16_t)); + if (t22) { + return t22; + } + t2 = (*t1); + t23 = t2; + t1 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *)&t23; + t24 = t1; + t8 = (struct io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29__2071 const *const *)&t24; + t10 = (void const **)&t25.context; + t1 = (*t8); + t11 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t1->context; + t12 = (void const *)t11; + (*t10) = t12; + t13 = (nav__6137_51 (**)(void const *, nav__6137_40))&t25.writeFn; + (*t13) = &io_GenericWriter_28_2aarray_list_ArrayListAligned_28u8_2cnull_29_2cerror_7bOutOfMemory_7d_2c_28function_20_27appendWrite_27_29_29___4130; + t14 = t25; + t26 = t14; + t16 = (struct io_Writer__3718 const *)&t26; + t14 = (*t16); + t22 = io_Writer_writeAll__6127(t14, a1); + memcpy(&t21, &t22, sizeof(uint16_t)); + if (t21) { + return t21; + } + return 0; +} + +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_appendSlice__3978(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, nav__3978_41 const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t2; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + uintptr_t t3; + uint16_t t4; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (*t1); + t3 = a1.len; + t4 = array_list_ArrayListAligned_28u8_2cnull_29_ensureUnusedCapacity__3997(t2, t3); + if (t4) { + return t4; + } + t2 = (*t1); + array_list_ArrayListAligned_28u8_2cnull_29_appendSliceAssumeCapacity__3979(t2, a1); + return 0; +} + +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureUnusedCapacity__3997(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, uintptr_t const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t2; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + nav__3997_43 *t3; + nav__3997_43 t4; + uintptr_t t5; + nav__3997_52 t6; + uint16_t t7; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (*t1); + t3 = (nav__3997_43 *)&a0->items; + t4 = (*t3); + t5 = t4.len; + t6 = array_list_addOrOom__3922(t5, a1); + if (t6.error) { + t7 = t6.error; + return t7; + } + t5 = t6.payload; + t7 = array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacity__3995(t2, t5); + return t7; +} + +static void array_list_ArrayListAligned_28u8_2cnull_29_appendSliceAssumeCapacity__3979(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, nav__3979_41 const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + nav__3979_47 *t2; + nav__3979_47 t3; + uintptr_t t4; + uintptr_t t5; + uintptr_t t7; + uintptr_t *t6; + uint64_t t8; + uint64_t t9; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t11; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + uint8_t *t12; + uint8_t const *t13; + bool t10; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (nav__3979_47 *)&a0->items; + t3 = (*t2); + t4 = t3.len; + t5 = a1.len; + t5 = t4 + t5; + t6 = (uintptr_t *)&a0->capacity; + t7 = (*t6); + t8 = t5; + t9 = t7; + t10 = t8 <= t9; + debug_assert__180(t10); + t11 = (*t1); + t2 = (nav__3979_47 *)&t11->items; + t6 = &t2->len; + (*t6) = t5; + t11 = (*t1); + t2 = (nav__3979_47 *)&t11->items; + t5 = a1.len; + t3 = (*t2); + t12 = t3.ptr; + t12 = (uint8_t *)(((uintptr_t)t12) + (t4*sizeof(uint8_t))); + t3.ptr = t12; + t3.len = t5; + t13 = a1.ptr; + if (t3.len != 0) memcpy(t3.ptr, t13, t3.len * sizeof(uint8_t)); + return; +} + +static nav__3922_38 array_list_addOrOom__3922(uintptr_t const a0, uintptr_t const a1) { + nav__3922_42 t2; + uintptr_t t3; + uintptr_t t0; + uintptr_t const *t5; + uint8_t const *t6; + nav__3922_38 t8; + uint8_t t4; + uint8_t t1; + bool t7; + t2.f1 = zig_addo_u64(&t2.f0, a0, a1, UINT8_C(64)); + t3 = t2.f0; + t0 = t3; + t4 = t2.f1; + t1 = t4; + t5 = (uintptr_t const *)&t0; + t6 = (uint8_t const *)&t1; + t4 = (*t6); + t7 = t4 != UINT8_C(0); + if (t7) { + return (nav__3922_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_OutOfMemory}; + } + goto zig_block_0; + + zig_block_0:; + t3 = (*t5); + t8.payload = t3; + t8.error = UINT16_C(0); + return t8; +} + +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacity__3995(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, uintptr_t const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + uintptr_t *t2; + uintptr_t t3; + uint64_t t4; + uint64_t t5; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t7; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + uint16_t t8; + bool t6; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (uintptr_t *)&a0->capacity; + t3 = (*t2); + t4 = t3; + t5 = a1; + t6 = t4 >= t5; + if (t6) { + return 0; + } + goto zig_block_0; + + zig_block_0:; + t2 = (uintptr_t *)&a0->capacity; + t3 = (*t2); + t3 = array_list_ArrayListAlignedUnmanaged_28u8_2cnull_29_growCapacity__6196(t3, a1); + t7 = (*t1); + t8 = array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacityPrecise__3996(t7, t3); + return t8; +} + +static uintptr_t array_list_ArrayListAlignedUnmanaged_28u8_2cnull_29_growCapacity__6196(uintptr_t const a0, uintptr_t const a1) { + uintptr_t t1; + uintptr_t t2; + uintptr_t t0; + uint64_t t3; + uint64_t t4; + bool t5; + t0 = a0; + zig_loop_6: + t1 = t0; + t2 = t0; + t2 = t2 / (uintptr_t)2ul; + t2 = t2 + (uintptr_t)128ul; + t2 = zig_adds_u64(t1, t2, UINT8_C(64)); + t0 = t2; + t2 = t0; + t3 = t2; + t4 = a1; + t5 = t3 >= t4; + if (t5) { + t2 = t0; + return t2; + } + goto zig_block_0; + + zig_block_0:; + goto zig_loop_6; +} + +static uint16_t array_list_ArrayListAligned_28u8_2cnull_29_ensureTotalCapacityPrecise__3996(struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const a0, uintptr_t const a1) { + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *t1; + uintptr_t *t2; + uintptr_t t3; + uint64_t t4; + uint64_t t5; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t7; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *t0; + struct array_list_ArrayListAligned_28u8_2cnull_29__2040 t8; + nav__3996_43 t9; + nav__3996_43 t12; + nav__3996_43 t20; + nav__3996_43 t21; + nav__3996_43 t18; + struct mem_Allocator__565 *t10; + struct mem_Allocator__565 t11; + nav__3996_43 *t13; + uint8_t **t14; + uint8_t *t15; + nav__3996_55 t16; + nav__3996_43 const *t19; + uint16_t t17; + bool t6; + t0 = a0; + t1 = (struct array_list_ArrayListAligned_28u8_2cnull_29__2040 *const *)&t0; + t2 = (uintptr_t *)&a0->capacity; + t3 = (*t2); + t4 = t3; + t5 = a1; + t6 = t4 >= t5; + if (t6) { + return 0; + } + goto zig_block_0; + + zig_block_0:; + t7 = (*t1); + t8 = (*t7); + t9 = array_list_ArrayListAligned_28u8_2cnull_29_allocatedSlice__4006(t8); + t7 = (*t1); + t10 = (struct mem_Allocator__565 *)&t7->allocator; + t11 = (*t10); + t12 = mem_Allocator_remap__anon_3824__6223(t11, t9, a1); + t6 = t12.ptr != NULL; + if (t6) { + t9 = t12; + t7 = (*t1); + t13 = (nav__3996_43 *)&t7->items; + t14 = &t13->ptr; + t15 = t9.ptr; + (*t14) = t15; + t7 = (*t1); + t2 = (uintptr_t *)&t7->capacity; + t3 = t9.len; + (*t2) = t3; + goto zig_block_1; + } + t7 = (*t1); + t10 = (struct mem_Allocator__565 *)&t7->allocator; + t11 = (*t10); + t16 = mem_Allocator_alignedAlloc__anon_3829__6224(t11, a1); + if (t16.error) { + t17 = t16.error; + return t17; + } + t12 = t16.payload; + t18 = t12; + t19 = (nav__3996_43 const *)&t18; + t13 = (nav__3996_43 *)&a0->items; + t20 = (*t13); + t3 = t20.len; + t20 = (*t19); + t15 = t20.ptr; + t15 = (uint8_t *)(((uintptr_t)t15) + ((uintptr_t)0ul*sizeof(uint8_t))); + t20.ptr = t15; + t20.len = t3; + t13 = (nav__3996_43 *)&a0->items; + t21 = (*t13); + t15 = t21.ptr; + if (t20.len != 0) memcpy(t20.ptr, t15, t20.len * sizeof(uint8_t)); + t7 = (*t1); + t10 = (struct mem_Allocator__565 *)&t7->allocator; + t11 = (*t10); + mem_Allocator_free__anon_2631__4290(t11, t9); + t7 = (*t1); + t13 = (nav__3996_43 *)&t7->items; + t14 = &t13->ptr; + t15 = t12.ptr; + (*t14) = t15; + t7 = (*t1); + t2 = (uintptr_t *)&t7->capacity; + t3 = t12.len; + (*t2) = t3; + goto zig_block_1; + + zig_block_1:; + return 0; +} + +static nav__6223_39 mem_Allocator_remap__anon_3824__6223(struct mem_Allocator__565 const a0, nav__6223_39 const a1, uintptr_t const a2) { + struct mem_Allocator__565 const *t1; + nav__6223_39 const *t3; + uint64_t t4; + struct mem_Allocator__565 t6; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t15; + nav__6223_39 t7; + nav__6223_39 t10; + nav__6223_39 t2; + uint8_t *t8; + uint8_t *t20; + uint8_t *t21; + uint8_t *t22; + void *t9; + uintptr_t t11; + uintptr_t t13; + nav__6223_50 t12; + struct mem_Allocator_VTable__568 const *const *t16; + struct mem_Allocator_VTable__568 const *t17; + uint8_t *(*const *t18)(void *, nav__6223_39, uint8_t, uintptr_t, uintptr_t); + uint8_t *(*t19)(void *, nav__6223_39, uint8_t, uintptr_t, uintptr_t); + uint8_t *const *t23; + bool t5; + uint8_t t14; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = a1; + t3 = (nav__6223_39 const *)&t2; + t4 = a2; + t5 = t4 == UINT64_C(0); + if (t5) { + t6 = (*t1); + mem_Allocator_free__anon_2631__4290(t6, a1); + t7 = (*t3); + t8 = t7.ptr; + t8 = (uint8_t *)(((uintptr_t)t8) + ((uintptr_t)0ul*sizeof(uint8_t))); + t9 = (void *)t8; + t7.ptr = t9; + t7.len = (uintptr_t)0ul; + t10 = t7; + return t10; + } + goto zig_block_0; + + zig_block_0:; + t11 = a1.len; + t4 = t11; + t5 = t4 == UINT64_C(0); + if (t5) { + return (nav__6223_39){NULL,(uintptr_t)0xaaaaaaaaaaaaaaaaul}; + } + goto zig_block_1; + + zig_block_1:; + t10 = mem_sliceAsBytes__anon_2966__4339(a1); + t12 = math_mul__anon_3404__6107((uintptr_t)1ul, a2); + t5 = t12.error == UINT16_C(0); + if (t5) { + t13 = t12.payload; + t11 = t13; + goto zig_block_2; + } + return (nav__6223_39){NULL,(uintptr_t)0xaaaaaaaaaaaaaaaaul}; + + zig_block_2:; + t6 = (*t1); + t14 = mem_Alignment_fromByteUnits__1032((uintptr_t)1ul); + t13 = (uintptr_t)zig_return_address(); + t15 = t6; + t1 = (struct mem_Allocator__565 const *)&t15; + t16 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t17 = (*t16); + t18 = (uint8_t *(*const *)(void *, nav__6223_39, uint8_t, uintptr_t, uintptr_t))&t17->remap; + t19 = (*t18); + t9 = t6.ptr; + t20 = t19(t9, t10, t14, t11, t13); + t5 = t20 != NULL; + if (t5) { + t21 = t20; + t8 = t21; + goto zig_block_3; + } + return (nav__6223_39){NULL,(uintptr_t)0xaaaaaaaaaaaaaaaaul}; + + zig_block_3:; + t22 = t8; + t23 = (uint8_t *const *)&t22; + t8 = (*t23); + t8 = (uint8_t *)(((uintptr_t)t8) + ((uintptr_t)0ul*sizeof(uint8_t))); + t10.ptr = t8; + t10.len = t11; + memcpy(&t7, &t10, sizeof(nav__6223_39)); + t7 = mem_bytesAsSlice__anon_3844__6225(t7); + t10 = t7; + return t10; +} + +static nav__6224_40 mem_Allocator_alignedAlloc__anon_3829__6224(struct mem_Allocator__565 const a0, uintptr_t const a1) { + struct mem_Allocator__565 const *t1; + struct mem_Allocator__565 t2; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t5; + uintptr_t t3; + nav__6224_40 t4; + nav__6224_40 t8; + nav__6224_51 t6; + uint8_t *t9; + uint8_t *t10; + uint8_t *const *t11; + nav__6224_39 t12; + uint16_t t7; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = (*t1); + t3 = (uintptr_t)zig_return_address(); + t5 = t2; + t1 = (struct mem_Allocator__565 const *)&t5; + t2 = (*t1); + t6 = mem_Allocator_allocWithSizeAndAlignment__anon_2849__4310(t2, a1, t3); + if (t6.error) { + t7 = t6.error; + t8.payload = (nav__6224_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t8.error = t7; + t4 = t8; + goto zig_block_0; + } + t9 = t6.payload; + t10 = t9; + t11 = (uint8_t *const *)&t10; + t9 = (*t11); + t9 = (uint8_t *)(((uintptr_t)t9) + ((uintptr_t)0ul*sizeof(uint8_t))); + t12.ptr = t9; + t12.len = a1; + t8.payload = t12; + t8.error = UINT16_C(0); + t4 = t8; + goto zig_block_0; + + zig_block_0:; + return t4; +} + +static nav__6225_39 mem_bytesAsSlice__anon_3844__6225(nav__6225_39 const a0) { + uintptr_t t0; + uint64_t t1; + uint8_t *t4; + uint8_t *t5; + uint8_t *const *t6; + nav__6225_39 t7; + bool t2; + bool t3; + t0 = a0.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + t3 = true; + goto zig_block_1; + } + t3 = false; + goto zig_block_1; + + zig_block_1:; + if (t3) { + return (nav__6225_39){(uint8_t *)((void const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),(uintptr_t)0ul}; + } + goto zig_block_0; + + zig_block_0:; + t4 = a0.ptr; + t5 = t4; + t6 = (uint8_t *const *)&t5; + t0 = a0.len; + t0 = t0 / (uintptr_t)1ul; + t4 = (*t6); + t4 = (uint8_t *)(((uintptr_t)t4) + ((uintptr_t)0ul*sizeof(uint8_t))); + t7.ptr = t4; + t7.len = t0; + return t7; +} + +void c_instrument_hooks_deinit__236(struct instruments_root_InstrumentHooks__547 *const a0) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t4; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *t5; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t6; + struct instruments_perf_PerfInstrument__559 *t8; + bool t0; + uint8_t t7; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t4 = (*t3); + t5 = t4; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t5; + t6 = (*t4); + t7 = t6.tag; + switch (t7) { + case UINT8_C(0): { + goto zig_block_1; + } + case UINT8_C(1): { + t4 = (*t3); + t8 = (struct instruments_perf_PerfInstrument__559 *)&t4->payload.perf; + instruments_perf_PerfInstrument_deinit__740(t8); + goto zig_block_1; + } + case UINT8_C(2): { + goto zig_block_1; + } + default: zig_unreachable(); + } + + zig_block_1:; + mem_Allocator_destroy__anon_3862__6226((struct mem_Allocator__565){((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)&heap_CAllocator_vtable__3549)}, t1); + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + return; +} + +static void instruments_perf_PerfInstrument_deinit__740(struct instruments_perf_PerfInstrument__559 *const a0) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + struct fifo_UnixPipe_Reader__602 *t4; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + fifo_UnixPipe_Writer_deinit__1065(t3); + t2 = (*t1); + t4 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + fifo_UnixPipe_Reader_deinit__1072(t4); + return; +} + +static void mem_Allocator_destroy__anon_3862__6226(struct mem_Allocator__565 const a0, struct instruments_root_InstrumentHooks__547 *const a1) { + struct mem_Allocator__565 const *t1; + uint8_t *t2; + uint8_t *t3; + uint8_t *const *t4; + struct mem_Allocator__565 t5; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t10; + uint8_t (*t6)[72]; + nav__6226_52 t7; + uintptr_t t9; + struct mem_Allocator_VTable__568 const *const *t11; + struct mem_Allocator_VTable__568 const *t12; + void (*const *t13)(void *, nav__6226_52, uint8_t, uintptr_t); + void (*t14)(void *, nav__6226_52, uint8_t, uintptr_t); + void *t15; + uint8_t t8; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = (uint8_t *)a1; + t3 = t2; + t4 = (uint8_t *const *)&t3; + t5 = (*t1); + t2 = (*t4); + t2 = (uint8_t *)(((uintptr_t)t2) + ((uintptr_t)0ul*sizeof(uint8_t))); + t6 = (uint8_t (*)[72])t2; + t7.ptr = &(*t6)[(uintptr_t)0ul]; + t7.len = (uintptr_t)72ul; + t8 = mem_Alignment_fromByteUnits__1032((uintptr_t)8ul); + t9 = (uintptr_t)zig_return_address(); + t10 = t5; + t1 = (struct mem_Allocator__565 const *)&t10; + t11 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t12 = (*t11); + t13 = (void (*const *)(void *, nav__6226_52, uint8_t, uintptr_t))&t12->free; + t14 = (*t13); + t15 = t5.ptr; + t14(t15, t7, t8, t9); + return; +} + +static void fifo_UnixPipe_Writer_deinit__1065(struct fifo_UnixPipe_Writer__600 *const a0) { + struct fifo_UnixPipe_Writer__600 *const *t1; + struct fifo_UnixPipe_Writer__600 *t2; + struct fifo_UnixPipe_Writer__600 *t0; + struct fs_File__608 *t3; + struct fs_File__608 t4; + t0 = a0; + t1 = (struct fifo_UnixPipe_Writer__600 *const *)&t0; + t2 = (*t1); + t3 = (struct fs_File__608 *)&t2->file; + t4 = (*t3); + fs_File_close__1163(t4); + return; +} + +static void fifo_UnixPipe_Reader_deinit__1072(struct fifo_UnixPipe_Reader__602 *const a0) { + struct fifo_UnixPipe_Reader__602 *const *t1; + struct fifo_UnixPipe_Reader__602 *t2; + struct fifo_UnixPipe_Reader__602 *t0; + struct fs_File__608 *t3; + struct fs_File__608 t4; + t0 = a0; + t1 = (struct fifo_UnixPipe_Reader__602 *const *)&t0; + t2 = (*t1); + t3 = (struct fs_File__608 *)&t2->file; + t4 = (*t3); + fs_File_close__1163(t4); + return; +} + +static void fs_File_close__1163(struct fs_File__608 const a0) { + int32_t t0; + t0 = a0.handle; + posix_close__1406(t0); + return; +} + +bool c_instrument_hooks_is_instrumented__237(struct instruments_root_InstrumentHooks__547 *const a0) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t4; + struct instruments_perf_PerfInstrument__559 t8; + struct instruments_perf_PerfInstrument__559 t9; + bool t0; + bool t6; + bool t7; + uint8_t t5; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t1 = (*t3); + t4 = (*t1); + t5 = t4.tag; + switch (t5) { + case UINT8_C(0): { + t5 = running_on_valgrind(); + t7 = t5 > UINT8_C(0); + t6 = t7; + goto zig_block_2; + } + case UINT8_C(1): { + t8 = t4.payload.perf; + t9 = t8; + t7 = instruments_perf_PerfInstrument_is_instrumented__742(&t9); + t0 = t7; + goto zig_block_1; + } + case UINT8_C(2): { + t6 = false; + goto zig_block_2; + } + default: zig_unreachable(); + } + + zig_block_2:; + t0 = t6; + goto zig_block_1; + + zig_block_1:; + return t0; + } + goto zig_block_0; + + zig_block_0:; + return false; +} + +static zig_cold uint16_t instruments_perf_PerfInstrument_start_benchmark__743(struct instruments_perf_PerfInstrument__559 *const a0) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + struct fifo_UnixPipe_Reader__602 *t5; + uint16_t t4; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + t4 = fifo_UnixPipe_Writer_sendCmd__1064(t3, (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(1)}); + if (t4) { + return t4; + } + t2 = (*t1); + t5 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + t4 = fifo_UnixPipe_Reader_waitForAck__1071(t5, (nav__743_66){UINT64_C(0xaaaaaaaaaaaaaaaa),true}); + if (t4) { + return t4; + } + return 0; +} + +uint8_t c_instrument_hooks_start_benchmark__238(struct instruments_root_InstrumentHooks__547 *const a0) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *t5; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t6; + struct instruments_perf_PerfInstrument__559 *t8; + uint16_t t4; + uint16_t t9; + uint16_t t10; + bool t0; + uint8_t t7; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t1 = (*t3); + t5 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t5; + t6 = (*t1); + t7 = t6.tag; + t0 = t7 == UINT8_C(1); + if (t0) { + t1 = (*t3); + t8 = (struct instruments_perf_PerfInstrument__559 *)&t1->payload.perf; + t9 = instruments_perf_PerfInstrument_start_benchmark__743(t8); + memcpy(&t10, &t9, sizeof(uint16_t)); + t4 = t10; + goto zig_block_2; + } + t6 = (*t1); + t7 = t6.tag; + t0 = t7 == UINT8_C(0); + if (t0) { + t0 = features_is_feature_enabled__316(UINT64_C(0)); + t0 = !t0; + if (t0) { + callgrind_zero_stats(); + callgrind_start_instrumentation(); + goto zig_block_5; + } + goto zig_block_5; + + zig_block_5:; + t4 = 0; + goto zig_block_2; + } + goto zig_block_4; + + zig_block_4:; + goto zig_block_3; + + zig_block_3:; + t4 = 0; + goto zig_block_2; + + zig_block_2:; + memcpy(&t10, &t4, sizeof(uint16_t)); + t0 = t10 == UINT16_C(0); + if (t0) { + goto zig_block_1; + } + return UINT8_C(1); + + zig_block_1:; + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + return UINT8_C(0); +} + +static bool features_is_feature_enabled__316(uint64_t const a0) { + uint64_t t0; + uint64_t t1; + uintptr_t t2; + bool t3; + t0 = (*&features_features__314); + t1 = a0; + t2 = t1; + t3 = bit_set_IntegerBitSet_2864_29_isSet__351(t0, t2); + return t3; +} + +static bool bit_set_IntegerBitSet_2864_29_isSet__351(uint64_t const a0, uintptr_t const a1) { + uint64_t t0; + uint64_t t2; + bool t1; + t0 = a1; + t1 = t0 < UINT64_C(64); + debug_assert__180(t1); + t0 = zig_wrap_u64((uint64_t)a0, UINT8_C(64)); + t2 = bit_set_IntegerBitSet_2864_29_maskBit__375(a1); + t2 = t0 & t2; + t1 = t2 != UINT64_C(0); + return t1; +} + +static zig_cold uint16_t instruments_perf_PerfInstrument_stop_benchmark__744(struct instruments_perf_PerfInstrument__559 *const a0) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + struct fifo_UnixPipe_Reader__602 *t5; + uint16_t t4; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + t4 = fifo_UnixPipe_Writer_sendCmd__1064(t3, (struct shared_Command__2009){{{{(uint8_t const *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},UINT32_C(0xaaaaaaaa)}},UINT8_C(2)}); + if (t4) { + return t4; + } + t2 = (*t1); + t5 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + t4 = fifo_UnixPipe_Reader_waitForAck__1071(t5, (nav__744_66){UINT64_C(0xaaaaaaaaaaaaaaaa),true}); + if (t4) { + return t4; + } + return 0; +} + +uint8_t c_instrument_hooks_stop_benchmark__239(struct instruments_root_InstrumentHooks__547 *const a0) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *t5; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t6; + struct instruments_perf_PerfInstrument__559 *t8; + uint16_t t4; + uint16_t t9; + uint16_t t10; + bool t0; + uint8_t t7; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t1 = (*t3); + t5 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t5; + t6 = (*t1); + t7 = t6.tag; + t0 = t7 == UINT8_C(0); + if (t0) { + t0 = features_is_feature_enabled__316(UINT64_C(0)); + t0 = !t0; + if (t0) { + callgrind_stop_instrumentation(); + goto zig_block_4; + } + goto zig_block_4; + + zig_block_4:; + t4 = 0; + goto zig_block_2; + } + t6 = (*t1); + t7 = t6.tag; + t0 = t7 == UINT8_C(1); + if (t0) { + t1 = (*t3); + t8 = (struct instruments_perf_PerfInstrument__559 *)&t1->payload.perf; + t9 = instruments_perf_PerfInstrument_stop_benchmark__744(t8); + memcpy(&t10, &t9, sizeof(uint16_t)); + t4 = t10; + goto zig_block_2; + } + goto zig_block_5; + + zig_block_5:; + goto zig_block_3; + + zig_block_3:; + t4 = 0; + goto zig_block_2; + + zig_block_2:; + memcpy(&t10, &t4, sizeof(uint16_t)); + t0 = t10 == UINT16_C(0); + if (t0) { + goto zig_block_1; + } + return UINT8_C(1); + + zig_block_1:; + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + return UINT8_C(0); +} + +static uint16_t instruments_perf_PerfInstrument_set_executed_benchmark__745(struct instruments_perf_PerfInstrument__559 *const a0, uint32_t const a1, uint8_t const *const a2) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + nav__745_56 t4; + nav__745_56 t5; + struct shared_Command__struct_2012__2012 t6; + struct shared_Command__2009 t7; + struct fifo_UnixPipe_Reader__602 *t9; + uint16_t t8; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + t4 = mem_span__anon_3983__6231(a2); + memcpy(&t5, &t4, sizeof(nav__745_56)); + t6.uri = t5; + t6.pid = a1; + t7.tag = UINT8_C(0); + t7.payload.ExecutedBenchmark = t6; + t8 = fifo_UnixPipe_Writer_sendCmd__1064(t3, t7); + if (t8) { + return t8; + } + t2 = (*t1); + t9 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + t8 = fifo_UnixPipe_Reader_waitForAck__1071(t9, (nav__745_66){UINT64_C(0xaaaaaaaaaaaaaaaa),true}); + if (t8) { + return t8; + } + return 0; +} + +uint8_t c_instrument_hooks_set_executed_benchmark__240(struct instruments_root_InstrumentHooks__547 *const a0, uint32_t const a1, uint8_t const *const a2) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *t5; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t6; + uint8_t const *t8; + struct instruments_perf_PerfInstrument__559 *t9; + uint16_t t4; + uint16_t t10; + bool t0; + uint8_t t7; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t1 = (*t3); + t5 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t5; + t6 = (*t1); + t7 = t6.tag; + switch (t7) { + case UINT8_C(0): { + t8 = (uint8_t const *)a2; + callgrind_dump_stats_at(t8); + goto zig_block_3; + } + case UINT8_C(1): { + t1 = (*t3); + t9 = (struct instruments_perf_PerfInstrument__559 *)&t1->payload.perf; + t10 = instruments_perf_PerfInstrument_set_executed_benchmark__745(t9, a1, a2); + if (t10) { + t4 = t10; + goto zig_block_2; + } + goto zig_block_3; + } + case UINT8_C(2): { + goto zig_block_3; + } + default: zig_unreachable(); + } + + zig_block_3:; + t4 = 0; + goto zig_block_2; + + zig_block_2:; + memcpy(&t10, &t4, sizeof(uint16_t)); + t0 = t10 == UINT16_C(0); + if (t0) { + goto zig_block_1; + } + return UINT8_C(1); + + zig_block_1:; + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + return UINT8_C(0); +} + +static nav__6231_39 mem_span__anon_3983__6231(uint8_t const *const a0) { + uint8_t const *const *t1; + uintptr_t t2; + uint8_t const *t3; + uint8_t const *t0; + nav__6231_39 t4; + t0 = a0; + t1 = (uint8_t const *const *)&t0; + t2 = mem_len__anon_3992__6232(a0); + t3 = (*t1); + t3 = (uint8_t const *)(((uintptr_t)t3) + ((uintptr_t)0ul*sizeof(uint8_t))); + t4.ptr = t3; + t4.len = t2; + return t4; +} + +static uintptr_t mem_len__anon_3992__6232(uint8_t const *const a0) { + uint8_t const *t1; + uintptr_t t2; + bool t0; + t0 = a0 != NULL; + debug_assert__180(t0); + t1 = (uint8_t const *)a0; + t2 = mem_indexOfSentinel__anon_4000__6233(t1); + return t2; +} + +static uintptr_t mem_indexOfSentinel__anon_4000__6233(uint8_t const *const a0) { + static uint8_t const t11[32] = "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"; + static uint8_t const t18[32] = "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"; + uint8_t const *const *t1; + uintptr_t t4; + uintptr_t t6; + uintptr_t t16; + uintptr_t t2; + uint8_t const *t5; + uint8_t const *t0; + uint64_t t7; + uint8_t const (*t8)[32]; + uint8_t const (*t17)[32]; + bool t3; + uint8_t t9[32]; + uint8_t t10[32]; + bool t12[32]; + nav__6233_45 t13; + nav__6233_47 t14; + uint8_t t15; + t0 = a0; + t1 = (uint8_t const *const *)&t0; + t2 = (uintptr_t)0ul; + t3 = math_isPowerOfTwo__anon_4011__6234(); + if (t3) { + debug_assert__180(true); + t4 = t2; + t5 = (*t1); + t5 = (uint8_t const *)&t5[t4]; + t4 = (uintptr_t)t5; + t6 = t4 & (uintptr_t)4095ul; + t7 = t6; + t3 = t7 <= UINT64_C(4064); + if (t3) { + t6 = t2; + t5 = (*t1); + t5 = (uint8_t const *)(((uintptr_t)t5) + (t6*sizeof(uint8_t))); + t8 = (uint8_t const (*)[32])t5; + memcpy(t9, (const char *)t8, sizeof(uint8_t[32])); + memcpy(&t10, &t9, sizeof(uint8_t[32])); + for (t6 = (uintptr_t)0ul; t6 < (uintptr_t)32ul; t6 += (uintptr_t)1ul) { + t12[t6] = t10[t6] == t11[t6]; + } + t3 = false; + for (t6 = (uintptr_t)0ul; t6 < (uintptr_t)32ul; t6 += (uintptr_t)1ul) { + t3 |= t12[t6]; + } + if (t3) { + t4 = t2; + memcpy(t13.array, t12, sizeof(nav__6233_45)); + t14 = simd_firstTrue__anon_4223__6599(t13); + t15 = t14.payload; + t6 = (uintptr_t)t15; + t6 = t4 + t6; + return t6; + } + goto zig_block_2; + + zig_block_2:; + t6 = t2; + t16 = mem_alignForward__anon_4226__6600(t4, (uintptr_t)32ul); + t4 = t16 - t4; + t4 = t4 / (uintptr_t)1ul; + t4 = t6 + t4; + t2 = t4; + goto zig_block_1; + } + zig_loop_79: + t4 = t2; + t5 = (*t1); + t5 = (uint8_t const *)&t5[t4]; + t4 = (uintptr_t)t5; + t4 = t4 & (uintptr_t)31ul; + t7 = t4; + t3 = t7 != UINT64_C(0); + if (t3) { + t4 = t2; + t15 = a0[t4]; + t3 = t15 == UINT8_C(0); + if (t3) { + t4 = t2; + return t4; + } + goto zig_block_5; + + zig_block_5:; + t4 = t2; + t4 = t4 + (uintptr_t)1ul; + t2 = t4; + goto zig_block_4; + } + goto zig_block_3; + + zig_block_4:; + goto zig_loop_79; + + zig_block_3:; + goto zig_block_1; + + zig_block_1:; + t6 = t2; + t5 = (*t1); + t5 = (uint8_t const *)&t5[t6]; + t6 = (uintptr_t)t5; + t3 = mem_isAligned__901(t6, (uintptr_t)32ul); + debug_assert__180(t3); + zig_loop_114: + t6 = t2; + t5 = (*t1); + t5 = (uint8_t const *)(((uintptr_t)t5) + (t6*sizeof(uint8_t))); + t8 = (uint8_t const (*)[32])t5; + t17 = (uint8_t const (*)[32])t8; + memcpy(t10, (const char *)t17, sizeof(uint8_t[32])); + for (t6 = (uintptr_t)0ul; t6 < (uintptr_t)32ul; t6 += (uintptr_t)1ul) { + t12[t6] = t10[t6] == t18[t6]; + } + t3 = false; + for (t6 = (uintptr_t)0ul; t6 < (uintptr_t)32ul; t6 += (uintptr_t)1ul) { + t3 |= t12[t6]; + } + if (t3) { + t6 = t2; + memcpy(t13.array, t12, sizeof(nav__6233_45)); + t14 = simd_firstTrue__anon_4223__6599(t13); + t15 = t14.payload; + t4 = (uintptr_t)t15; + t4 = t6 + t4; + return t4; + } + goto zig_block_6; + + zig_block_6:; + t6 = t2; + t6 = t6 + (uintptr_t)32ul; + t2 = t6; + goto zig_loop_114; + } + goto zig_block_0; + + zig_block_0:; + zig_loop_145: + t16 = t2; + t15 = a0[t16]; + t3 = t15 != UINT8_C(0); + if (t3) { + t16 = t2; + t16 = t16 + (uintptr_t)1ul; + t2 = t16; + goto zig_block_8; + } + goto zig_block_7; + + zig_block_8:; + goto zig_loop_145; + + zig_block_7:; + t16 = t2; + return t16; +} + +static bool math_isPowerOfTwo__anon_4011__6234(void) { + debug_assert__180(true); + return true; +} + +static nav__6599_38 simd_firstTrue__anon_4223__6599(nav__6599_40 const a0) { + static uint8_t const t2[32] = {UINT8_C(0),UINT8_C(1),UINT8_C(2),UINT8_C(3),UINT8_C(4),UINT8_C(5),UINT8_C(6),UINT8_C(7),UINT8_C(8),UINT8_C(9),UINT8_C(10),UINT8_C(11),UINT8_C(12),UINT8_C(13),UINT8_C(14),UINT8_C(15),UINT8_C(16),UINT8_C(17),UINT8_C(18),UINT8_C(19),UINT8_C(20),UINT8_C(21),UINT8_C(22),UINT8_C(23),UINT8_C(24),UINT8_C(25),UINT8_C(26),UINT8_C(27),UINT8_C(28),UINT8_C(29),UINT8_C(30),UINT8_C(31)}; + static uint8_t const t3[32] = {UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31),UINT8_C(31)}; + uintptr_t t1; + bool t0; + uint8_t t4[32]; + uint8_t t5; + nav__6599_38 t6; + t0 = false; + for (t1 = (uintptr_t)0ul; t1 < (uintptr_t)32ul; t1 += (uintptr_t)1ul) { + t0 |= a0.array[t1]; + } + t0 = !t0; + if (t0) { + return (nav__6599_38){true,UINT8_C(0xa)}; + } + goto zig_block_0; + + zig_block_0:; + for (t1 = (uintptr_t)0ul; t1 < (uintptr_t)32ul; t1 += (uintptr_t)1ul) { + t4[t1] = a0.array[t1] ? t2[t1] : t3[t1]; + } + t5 = UINT8_C(31); + for (t1 = (uintptr_t)0ul; t1 < (uintptr_t)32ul; t1 += (uintptr_t)1ul) { + t5 = t5 < t4[t1] ? t5 : t4[t1]; + } + t6.is_null = false; + t6.payload = t5; + return t6; +} + +static uintptr_t mem_alignForward__anon_4226__6600(uintptr_t const a0, uintptr_t const a1) { + uintptr_t t1; + bool t0; + t0 = mem_isValidAlignGeneric__anon_4284__6601(a1); + debug_assert__180(t0); + t1 = a1 - (uintptr_t)1ul; + t1 = a0 + t1; + t1 = mem_alignBackward__anon_4285__6602(t1, a1); + return t1; +} + +static bool mem_isAligned__901(uintptr_t const a0, uintptr_t const a1) { + uint64_t t0; + uint64_t t1; + bool t2; + t0 = a0; + t1 = a1; + t2 = mem_isAlignedGeneric__anon_4291__6603(t0, t1); + return t2; +} + +static bool mem_isValidAlignGeneric__anon_4284__6601(uintptr_t const a0) { + uint64_t t0; + bool t1; + bool t2; + t0 = a0; + t1 = t0 > UINT64_C(0); + if (t1) { + t1 = math_isPowerOfTwo__anon_3074__5975(a0); + t2 = t1; + goto zig_block_0; + } + t2 = false; + goto zig_block_0; + + zig_block_0:; + return t2; +} + +static uintptr_t mem_alignBackward__anon_4285__6602(uintptr_t const a0, uintptr_t const a1) { + uintptr_t t1; + bool t0; + t0 = mem_isValidAlignGeneric__anon_4284__6601(a1); + debug_assert__180(t0); + t1 = a1 - (uintptr_t)1ul; + t1 = zig_not_u64(t1, UINT8_C(64)); + t1 = a0 & t1; + return t1; +} + +static bool mem_isAlignedGeneric__anon_4291__6603(uint64_t const a0, uint64_t const a1) { + uint64_t t0; + bool t1; + t0 = mem_alignBackward__anon_4293__6604(a0, a1); + t1 = t0 == a0; + return t1; +} + +static uint64_t mem_alignBackward__anon_4293__6604(uint64_t const a0, uint64_t const a1) { + uint64_t t1; + bool t0; + t0 = mem_isValidAlignGeneric__anon_4295__6605(a1); + debug_assert__180(t0); + t1 = a1 - UINT64_C(1); + t1 = zig_not_u64(t1, UINT8_C(64)); + t1 = a0 & t1; + return t1; +} + +static bool mem_isValidAlignGeneric__anon_4295__6605(uint64_t const a0) { + bool t0; + bool t1; + t0 = a0 > UINT64_C(0); + if (t0) { + t0 = math_isPowerOfTwo__anon_4296__6606(a0); + t1 = t0; + goto zig_block_0; + } + t1 = false; + goto zig_block_0; + + zig_block_0:; + return t1; +} + +static bool math_isPowerOfTwo__anon_4296__6606(uint64_t const a0) { + uint64_t t1; + bool t0; + t0 = a0 > UINT64_C(0); + debug_assert__180(t0); + t1 = a0 - UINT64_C(1); + t1 = a0 & t1; + t0 = t1 == UINT64_C(0); + return t0; +} + +uint8_t c_instrument_hooks_executed_benchmark__241(struct instruments_root_InstrumentHooks__547 *const a0, uint32_t const a1, uint8_t const *const a2) { + uint8_t t0; + t0 = c_instrument_hooks_set_executed_benchmark__240(a0, a1, a2); + return t0; +} + +static uint16_t instruments_perf_PerfInstrument_set_integration__746(struct instruments_perf_PerfInstrument__559 *const a0, uint8_t const *const a1, uint8_t const *const a2) { + struct instruments_perf_PerfInstrument__559 *const *t1; + struct instruments_perf_PerfInstrument__559 *t2; + struct instruments_perf_PerfInstrument__559 *t0; + struct fifo_UnixPipe_Writer__600 *t3; + nav__746_56 t4; + nav__746_56 t5; + nav__746_56 t6; + struct shared_Command__struct_2013__2013 t7; + struct shared_Command__2009 t8; + struct fifo_UnixPipe_Reader__602 *t10; + uint16_t t9; + t0 = a0; + t1 = (struct instruments_perf_PerfInstrument__559 *const *)&t0; + t2 = (*t1); + t3 = (struct fifo_UnixPipe_Writer__600 *)&t2->writer; + t4 = mem_span__anon_3983__6231(a1); + t5 = mem_span__anon_3983__6231(a2); + memcpy(&t6, &t4, sizeof(nav__746_56)); + memcpy(&t4, &t5, sizeof(nav__746_56)); + t7.name = t6; + t7.version = t4; + t8.tag = UINT8_C(5); + t8.payload.SetIntegration = t7; + t9 = fifo_UnixPipe_Writer_sendCmd__1064(t3, t8); + if (t9) { + return t9; + } + t2 = (*t1); + t10 = (struct fifo_UnixPipe_Reader__602 *)&t2->reader; + t9 = fifo_UnixPipe_Reader_waitForAck__1071(t10, (nav__746_66){UINT64_C(0xaaaaaaaaaaaaaaaa),true}); + if (t9) { + return t9; + } + return 0; +} + +uint8_t c_instrument_hooks_set_integration__242(struct instruments_root_InstrumentHooks__547 *const a0, uint8_t const *const a1, uint8_t const *const a2) { + struct instruments_root_InstrumentHooks__547 *t1; + struct instruments_root_InstrumentHooks__547 *t2; + struct instruments_root_InstrumentHooks__547 *t5; + struct instruments_root_InstrumentHooks__547 *const *t3; + struct instruments_root_InstrumentHooks__547 t6; + struct instruments_valgrind_ValgrindInstrument__554 *t8; + struct instruments_valgrind_ValgrindInstrument__554 t9; + struct instruments_valgrind_ValgrindInstrument__554 t11; + struct instruments_valgrind_ValgrindInstrument__554 const *t12; + struct mem_Allocator__565 t13; + nav__242_62 t14; + nav__242_67 t15; + nav__242_65 t17; + uint8_t *t18; + uint8_t const *t19; + struct mem_Allocator__565 const *t20; + struct instruments_perf_PerfInstrument__559 *t21; + uint16_t t4; + uint16_t t10; + uint16_t t16; + bool t0; + uint8_t t7; + t0 = a0 != NULL; + if (t0) { + t1 = a0; + t2 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t2; + t1 = (*t3); + t5 = t1; + t3 = (struct instruments_root_InstrumentHooks__547 *const *)&t5; + t6 = (*t1); + t7 = t6.tag; + switch (t7) { + case UINT8_C(0): { + t1 = (*t3); + t8 = (struct instruments_valgrind_ValgrindInstrument__554 *)&t1->payload.valgrind; + t9 = (*t8); + t11 = t9; + t12 = (struct instruments_valgrind_ValgrindInstrument__554 const *)&t11; + t13 = t9.allocator; + t14.f0 = a1; + t14.f1 = a2; + t15 = fmt_allocPrintZ__anon_4333__6607(t13, t14); + if (t15.error) { + t16 = t15.error; + t10 = t16; + goto zig_block_4; + } + t17 = t15.payload; + t18 = t17.ptr; + t19 = (uint8_t const *)t18; + callgrind_dump_stats_at(t19); + t20 = (struct mem_Allocator__565 const *)&t12->allocator; + t13 = (*t20); + mem_Allocator_free__anon_4335__6608(t13, t17); + t10 = 0; + goto zig_block_4; + + zig_block_4:; + memcpy(&t16, &t10, sizeof(uint16_t)); + if (t16) { + t4 = t16; + goto zig_block_2; + } + goto zig_block_3; + } + case UINT8_C(1): { + t1 = (*t3); + t21 = (struct instruments_perf_PerfInstrument__559 *)&t1->payload.perf; + t16 = instruments_perf_PerfInstrument_set_integration__746(t21, a1, a2); + if (t16) { + t4 = t16; + goto zig_block_2; + } + goto zig_block_3; + } + case UINT8_C(2): { + goto zig_block_3; + } + default: zig_unreachable(); + } + + zig_block_3:; + t4 = 0; + goto zig_block_2; + + zig_block_2:; + memcpy(&t16, &t4, sizeof(uint16_t)); + t0 = t16 == UINT16_C(0); + if (t0) { + goto zig_block_1; + } + return UINT8_C(1); + + zig_block_1:; + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + return UINT8_C(0); +} + +static nav__6607_40 fmt_allocPrintZ__anon_4333__6607(struct mem_Allocator__565 const a0, nav__6607_43 const a1) { + nav__6607_40 t0; + nav__6607_39 t2; + nav__6607_39 t3; + nav__6607_39 const *t4; + uintptr_t t5; + uint8_t *t6; + uint16_t t1; + t0 = fmt_allocPrint__anon_4360__6609(a0, a1); + if (t0.error) { + t1 = t0.error; + t0.payload = (nav__6607_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t0.error = t1; + return t0; + } + t2 = t0.payload; + t3 = t2; + t4 = (nav__6607_39 const *)&t3; + t5 = t2.len; + t5 = t5 - (uintptr_t)1ul; + t2 = (*t4); + t6 = t2.ptr; + t6 = (uint8_t *)(((uintptr_t)t6) + ((uintptr_t)0ul*sizeof(uint8_t))); + t2.ptr = t6; + t2.len = t5; + t0.payload = t2; + t0.error = UINT16_C(0); + return t0; +} + +static void mem_Allocator_free__anon_4335__6608(struct mem_Allocator__565 const a0, nav__6608_40 const a1) { + struct mem_Allocator__565 const *t1; + nav__6608_40 t2; + uintptr_t t3; + uint64_t t4; + uint8_t *t6; + uint8_t *t7; + uint8_t *t8; + uint8_t *const *t9; + struct mem_Allocator__565 t10; + struct mem_Allocator__565 t0; + struct mem_Allocator__565 t12; + struct mem_Allocator_VTable__568 const *const *t13; + struct mem_Allocator_VTable__568 const *t14; + void (*const *t15)(void *, nav__6608_40, uint8_t, uintptr_t); + void (*t16)(void *, nav__6608_40, uint8_t, uintptr_t); + void *t17; + bool t5; + uint8_t t11; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t2 = mem_sliceAsBytes__anon_4367__6610(a1); + t3 = t2.len; + t3 = t3 + (uintptr_t)1ul; + t4 = t3; + t5 = t4 == UINT64_C(0); + if (t5) { + return; + } + goto zig_block_0; + + zig_block_0:; + t6 = t2.ptr; + t7 = (uint8_t *)t6; + t8 = t7; + t9 = (uint8_t *const *)&t8; + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t2.ptr = t7; + t2.len = t3; + t10 = (*t1); + t7 = (*t9); + t7 = (uint8_t *)(((uintptr_t)t7) + ((uintptr_t)0ul*sizeof(uint8_t))); + t2.ptr = t7; + t2.len = t3; + t11 = mem_Alignment_fromByteUnits__1032((uintptr_t)1ul); + t3 = (uintptr_t)zig_return_address(); + t12 = t10; + t1 = (struct mem_Allocator__565 const *)&t12; + t13 = (struct mem_Allocator_VTable__568 const *const *)&t1->vtable; + t14 = (*t13); + t15 = (void (*const *)(void *, nav__6608_40, uint8_t, uintptr_t))&t14->free; + t16 = (*t15); + t17 = t10.ptr; + t16(t17, t2, t11, t3); + return; +} + +static nav__6609_40 fmt_allocPrint__anon_4360__6609(struct mem_Allocator__565 const a0, nav__6609_43 const a1) { + struct mem_Allocator__565 const *t1; + uintptr_t t2; + uintptr_t t6; + uint64_t t3; + nav__6609_54 t4; + struct mem_Allocator__565 t7; + struct mem_Allocator__565 t0; + nav__6609_40 t8; + nav__6609_40 t11; + nav__6609_39 t10; + uint16_t t9; + bool t5; + t0 = a0; + t1 = (struct mem_Allocator__565 const *)&t0; + t3 = fmt_count__anon_4374__6611(a1); + t4 = math_cast__anon_4376__6612(t3); + t5 = t4.is_null != true; + if (t5) { + t6 = t4.payload; + t2 = t6; + goto zig_block_0; + } + return (nav__6609_40){{(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},zig_error_OutOfMemory}; + + zig_block_0:; + t7 = (*t1); + t8 = mem_Allocator_alloc__anon_2204__4173(t7, t2); + if (t8.error) { + t9 = t8.error; + t8.payload = (nav__6609_39){(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul}; + t8.error = t9; + return t8; + } + t10 = t8.payload; + t8 = fmt_bufPrint__anon_4385__6613(t10, a1); + t5 = t8.error == UINT16_C(0); + if (t5) { + t10 = t8.payload; + t8.payload = t10; + t8.error = UINT16_C(0); + t11 = t8; + goto zig_block_1; + } + t9 = t8.error; + switch (t9) { + case zig_error_NoSpaceLeft: { + zig_unreachable(); + } + default: zig_unreachable(); + } + + zig_block_1:; + return t11; +} + +static nav__6610_39 mem_sliceAsBytes__anon_4367__6610(nav__6610_39 const a0) { + uintptr_t t0; + uint64_t t1; + uint8_t *t3; + uint8_t *t4; + uint8_t *t5; + uint8_t *const *t6; + nav__6610_39 t7; + bool t2; + t0 = a0.len; + t1 = t0; + t2 = t1 == UINT64_C(0); + if (t2) { + goto zig_block_0; + } + goto zig_block_0; + + zig_block_0:; + t3 = a0.ptr; + t4 = (uint8_t *)t3; + t5 = t4; + t6 = (uint8_t *const *)&t5; + t0 = a0.len; + t4 = (*t6); + t4 = (uint8_t *)(((uintptr_t)t4) + ((uintptr_t)0ul*sizeof(uint8_t))); + t7.ptr = t4; + t7.len = t0; + return t7; +} + +static uint16_t fmt_format__anon_4442__6652(struct io_Writer__3718 const a0, nav__6652_40 const a1) { + struct io_Writer__3718 const *t1; + struct io_Writer__3718 t2; + struct io_Writer__3718 t0; + uint8_t const *t4; + uint16_t t3; + t0 = a0; + t1 = (struct io_Writer__3718 const *)&t0; + t2 = (*t1); + t3 = io_Writer_writeAll__6127(t2, (nav__6652_44){(uint8_t const *)&__anon_4497,(uintptr_t)10ul}); + if (t3) { + return t3; + } + t4 = a1.f0; + t3 = fmt_formatType__anon_4770__6820(t4, (struct fmt_FormatOptions__4756){{(uintptr_t)0xaaaaaaaaaaaaaaaaul,true},{(uintptr_t)0xaaaaaaaaaaaaaaaaul,true},UINT32_C(32),UINT8_C(2)}, a0, (uintptr_t)3ul); + if (t3) { + return t3; + } + t2 = (*t1); + t3 = io_Writer_writeAll__6127(t2, (nav__6652_44){(uint8_t const *)&__anon_4778,(uintptr_t)1ul}); + if (t3) { + return t3; + } + t4 = a1.f1; + t3 = fmt_formatType__anon_4770__6820(t4, (struct fmt_FormatOptions__4756){{(uintptr_t)0xaaaaaaaaaaaaaaaaul,true},{(uintptr_t)0xaaaaaaaaaaaaaaaaul,true},UINT32_C(32),UINT8_C(2)}, a0, (uintptr_t)3ul); + if (t3) { + return t3; + } + t2 = (*t1); + t3 = io_Writer_writeAll__6127(t2, (nav__6652_44){(uint8_t const *)&__anon_4798,(uintptr_t)1ul}); + if (t3) { + return t3; + } + return 0; +} + +static uint64_t fmt_count__anon_4374__6611(nav__6611_39 const a0) { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 t1; + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 t0; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 t2; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 t3; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 const *t4; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 const *t5; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 const *const *t6; + void const **t8; + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *t9; + void const *t10; + nav__6611_53 (**t11)(void const *, nav__6611_54); + struct io_Writer__3718 t12; + struct io_Writer__3718 t7; + uint64_t t15; + uint16_t t13; + bool t14; + t1 = io_counting_writer_countingWriter__anon_4406__6638(); + t0 = t1; + t2 = io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_writer__6637(&t0); + t3 = t2; + t4 = (struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 const *)&t3; + t5 = t4; + t6 = (struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 const *const *)&t5; + t8 = (void const **)&t7.context; + t4 = (*t6); + t9 = (struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *)&t4->context; + t10 = (void const *)t9; + (*t8) = t10; + t11 = (nav__6611_53 (**)(void const *, nav__6611_54))&t7.writeFn; + (*t11) = &io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWr__6651; + t12 = t7; + t13 = fmt_format__anon_4442__6652(t12, a0); + t14 = t13 == UINT16_C(0); + if (t14) { + goto zig_block_0; + } + zig_unreachable(); + + zig_block_0:; + t1 = t0; + t15 = t1.bytes_written; + return t15; +} + +static nav__6612_38 math_cast__anon_4376__6612(uint64_t const a0) { + uintptr_t t0; + nav__6612_38 t1; + t0 = a0; + t1.is_null = false; + t1.payload = t0; + return t1; +} + +static nav__6613_40 fmt_bufPrint__anon_4385__6613(nav__6613_39 const a0, nav__6613_42 const a1) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t1; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t0; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 t2; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 t3; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 const *t4; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 const *t5; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 const *const *t6; + void const **t8; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t9; + void const *t10; + nav__6613_57 (**t11)(void const *, nav__6613_58); + struct io_Writer__3718 t12; + struct io_Writer__3718 t7; + nav__6613_39 t15; + nav__6613_40 t16; + uint16_t t13; + bool t14; + t1 = io_fixed_buffer_stream_fixedBufferStream__anon_2226__4205(a0); + t0 = t1; + t2 = io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_writer__4195(&t0); + t3 = t2; + t4 = (struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 const *)&t3; + t5 = t4; + t6 = (struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 const *const *)&t5; + t8 = (void const **)&t7.context; + t4 = (*t6); + t9 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t4->context; + t10 = (void const *)t9; + (*t8) = t10; + t11 = (nav__6613_57 (**)(void const *, nav__6613_58))&t7.writeFn; + (*t11) = &io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write__6833; + t12 = t7; + t13 = fmt_format__anon_4442__6652(t12, a1); + t14 = t13 == UINT16_C(0); + if (t14) { + goto zig_block_0; + } + switch (t13) { + case zig_error_NoSpaceLeft: { + return (nav__6613_40){{(uint8_t *)(uintptr_t)0xaaaaaaaaaaaaaaaaul, (uintptr_t)0xaaaaaaaaaaaaaaaaul},zig_error_NoSpaceLeft}; + } + default: { + zig_unreachable(); + } + } + + zig_block_0:; + t1 = t0; + t15 = io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_getWritten__4203(t1); + t16.payload = t15; + t16.error = UINT16_C(0); + return t16; +} + +static struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 io_counting_writer_countingWriter__anon_4406__6638(void) { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 t0; + t0 = (struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403){UINT64_C(0)}; + return t0; +} + +static struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_writer__6637(struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const a0) { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 **t1; + struct io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_2cerror_7b_7d_2c_28function_20_27write_27_29_29__4423 t0; + t1 = (struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 **)&t0.context; + (*t1) = a0; + return t0; +} + +static nav__6651_38 io_GenericWriter_28_2aio_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWr__6651(void const *const a0, nav__6651_41 const a1) { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *t0; + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *t1; + nav__6651_38 t2; + nav__6651_38 t3; + t0 = (struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *)a0; + t1 = (*t0); + t2 = io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_write__6636(t1, a1); + memcpy(&t3, &t2, sizeof(nav__6651_38)); + return t3; +} + +static uint16_t fmt_formatType__anon_4770__6820(uint8_t const *const a0, struct fmt_FormatOptions__4756 const a1, struct io_Writer__3718 const a2, uintptr_t const a3) { + nav__6820_47 t2; + nav__6820_47 t3; + uint8_t const *t0; + struct io_Writer__3718 t1; + uint16_t t4; + uint16_t t5; + (void)a3; + t0 = a0; + t1 = a2; + t2 = mem_span__anon_3983__6231(a0); + memcpy(&t3, &t2, sizeof(nav__6820_47)); + t4 = fmt_formatBuf__anon_4870__6834(t3, a1, a2); + memcpy(&t5, &t4, sizeof(uint16_t)); + return t5; +} + +static struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_writer__4195(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const a0) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 **t1; + struct io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write_27_29_29__4814 t0; + t1 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 **)&t0.context; + (*t1) = a0; + return t0; +} + +static nav__6833_38 io_GenericWriter_28_2aio_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_2cerror_7bNoSpaceLeft_7d_2c_28function_20_27write__6833(void const *const a0, nav__6833_41 const a1) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t0; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t1; + nav__6833_38 t2; + nav__6833_38 t3; + t0 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)a0; + t1 = (*t0); + t2 = io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_write__4198(t1, a1); + memcpy(&t3, &t2, sizeof(nav__6833_38)); + return t3; +} + +static nav__4203_39 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_getWritten__4203(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 const a0) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 const *t1; + nav__4203_39 const *t2; + uintptr_t t3; + nav__4203_39 t4; + uint8_t *t5; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 t0; + t0 = a0; + t1 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 const *)&t0; + t2 = (nav__4203_39 const *)&t1->buffer; + t3 = a0.pos; + t4 = (*t2); + t5 = t4.ptr; + t5 = (uint8_t *)(((uintptr_t)t5) + ((uintptr_t)0ul*sizeof(uint8_t))); + t4.ptr = t5; + t4.len = t3; + return t4; +} + +static nav__6636_38 io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29_write__6636(struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const a0, nav__6636_42 const a1) { + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *t1; + nav__6636_38 t2; + uintptr_t t3; + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *t4; + struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *t0; + uint64_t *t5; + uint64_t t6; + uint64_t t7; + t0 = a0; + t1 = (struct io_counting_writer_CountingWriter_28io_GenericWriter_28void_2cerror_7b_7d_2c_28function_20_27dummyWrite_27_29_29_29__4403 *const *)&t0; + t2 = io_dummyWrite__4108(a1); + t3 = t2.payload; + t4 = (*t1); + t5 = (uint64_t *)&t4->bytes_written; + t6 = (*t5); + t7 = t3; + t7 = t6 + t7; + (*t5) = t7; + t2.payload = t3; + t2.error = UINT16_C(0); + return t2; +} + +static nav__6664_38 unicode_utf8ByteSequenceLength__6664(uint8_t const a0) { + nav__6664_38 t0; + switch (a0) { + default: if ((a0 >= UINT8_C(0) && a0 <= UINT8_C(127))) { + t0 = (nav__6664_38){0,UINT8_C(1)}; + goto zig_block_0; + }if ((a0 >= UINT8_C(192) && a0 <= UINT8_C(223))) { + t0 = (nav__6664_38){0,UINT8_C(2)}; + goto zig_block_0; + }if ((a0 >= UINT8_C(224) && a0 <= UINT8_C(239))) { + t0 = (nav__6664_38){0,UINT8_C(3)}; + goto zig_block_0; + }if ((a0 >= UINT8_C(240) && a0 <= UINT8_C(247))) { + t0 = (nav__6664_38){0,UINT8_C(4)}; + goto zig_block_0; + }{ + t0 = (nav__6664_38){zig_error_Utf8InvalidStartByte,UINT8_C(0x2)}; + goto zig_block_0; + } + } + + zig_block_0:; + return t0; +} + +static nav__6680_38 unicode_utf8CountCodepoints__6680(nav__6680_40 const a0) { + nav__6680_40 const *t1; + uintptr_t t4; + uintptr_t t5; + uintptr_t t2; + uintptr_t t3; + uint64_t t6; + uint64_t t7; + nav__6680_40 t9; + nav__6680_40 t0; + uint8_t const *t10; + uint8_t const (*t11)[8]; + nav__6680_38 t16; + nav__6680_50 t17; + nav__6680_48 t14; + uint16_t t15; + bool t8; + uint8_t t12[8]; + uint8_t t13; + t0 = a0; + t1 = (nav__6680_40 const *)&t0; + t2 = (uintptr_t)0ul; + t3 = (uintptr_t)0ul; + zig_loop_9: + t4 = t3; + t5 = a0.len; + t6 = t4; + t7 = t5; + t8 = t6 < t7; + if (t8) { + zig_loop_18: + t5 = t3; + t5 = t5 + (uintptr_t)8ul; + t4 = a0.len; + t7 = t5; + t6 = t4; + t8 = t7 <= t6; + if (t8) { + t4 = t3; + t9 = (*t1); + t10 = t9.ptr; + t10 = (uint8_t const *)(((uintptr_t)t10) + (t4*sizeof(uint8_t))); + t11 = (uint8_t const (*)[8])t10; + memcpy(t12, (const char *)t11, sizeof(uint8_t[8])); + memcpy(&t4, &t12, sizeof(uintptr_t)); + t4 = zig_wrap_u64(t4, UINT8_C(64)); + t4 = t4 & (uintptr_t)9259542123273814144ul; + t6 = t4; + t8 = t6 != UINT64_C(0); + if (t8) { + goto zig_block_2; + } + goto zig_block_4; + + zig_block_4:; + t4 = t2; + t4 = t4 + (uintptr_t)8ul; + t2 = t4; + t4 = t3; + t4 = t4 + (uintptr_t)8ul; + t3 = t4; + goto zig_block_3; + } + goto zig_block_2; + + zig_block_3:; + goto zig_loop_18; + + zig_block_2:; + t5 = t3; + t4 = a0.len; + t7 = t5; + t6 = t4; + t8 = t7 < t6; + if (t8) { + t4 = t3; + t13 = a0.ptr[t4]; + t14 = unicode_utf8ByteSequenceLength__6664(t13); + if (t14.error) { + t15 = t14.error; + t16.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t16.error = t15; + return t16; + } + t13 = t14.payload; + t4 = t3; + t5 = (uintptr_t)t13; + t5 = t4 + t5; + t4 = a0.len; + t6 = t5; + t7 = t4; + t8 = t6 > t7; + if (t8) { + return (nav__6680_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_TruncatedInput}; + } + goto zig_block_6; + + zig_block_6:; + switch (t13) { + case UINT8_C(1): { + goto zig_block_7; + } + default: { + t4 = t3; + t9 = (*t1); + t10 = t9.ptr; + t10 = (uint8_t const *)(((uintptr_t)t10) + (t4*sizeof(uint8_t))); + t4 = (uintptr_t)t13; + t9.ptr = t10; + t9.len = t4; + t17 = unicode_utf8Decode__6670(t9); + if (t17.error) { + t15 = t17.error; + t16.payload = (uintptr_t)0xaaaaaaaaaaaaaaaaul; + t16.error = t15; + return t16; + } + goto zig_block_7; + } + } + + zig_block_7:; + t4 = t3; + t5 = (uintptr_t)t13; + t5 = t4 + t5; + t3 = t5; + t5 = t2; + t5 = t5 + (uintptr_t)1ul; + t2 = t5; + goto zig_block_5; + } + goto zig_block_5; + + zig_block_5:; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_9; + + zig_block_0:; + t5 = t2; + t16.payload = t5; + t16.error = UINT16_C(0); + return t16; +} + +static uint16_t fmt_formatBuf__anon_4870__6834(nav__6834_39 const a0, struct fmt_FormatOptions__4756 const a1, struct io_Writer__3718 const a2) { + struct io_Writer__3718 const *t1; + nav__6834_44 t2; + uintptr_t t4; + uintptr_t t5; + uintptr_t t7; + nav__6834_48 t6; + uint64_t t8; + uint64_t t9; + struct io_Writer__3718 t10; + struct io_Writer__3718 t0; + nav__6834_57 t15; + nav__6834_39 t17; + nav__6834_39 t20; + uint8_t *t19; + uint32_t t14; + uint16_t t11; + uint16_t t12; + nav__6834_60 t16; + bool t3; + uint8_t t18; + uint8_t t13[4]; + t0 = a2; + t1 = (struct io_Writer__3718 const *)&t0; + t2 = a1.width; + t3 = t2.is_null != true; + if (t3) { + t4 = t2.payload; + t6 = unicode_utf8CountCodepoints__6680(a0); + t3 = t6.error == UINT16_C(0); + if (t3) { + t7 = t6.payload; + t5 = t7; + goto zig_block_1; + } + t7 = a0.len; + t5 = t7; + goto zig_block_1; + + zig_block_1:; + t8 = t5; + t9 = t4; + t3 = t8 < t9; + if (t3) { + t5 = t4 - t5; + t7 = t5; + goto zig_block_2; + } + t7 = (uintptr_t)0ul; + goto zig_block_2; + + zig_block_2:; + t9 = t7; + t3 = t9 == UINT64_C(0); + if (t3) { + t10 = (*t1); + t11 = io_Writer_writeAll__6127(t10, a0); + memcpy(&t12, &t11, sizeof(uint16_t)); + return t12; + } + goto zig_block_3; + + zig_block_3:; + t14 = a1.fill; + t15.ptr = &t13[(uintptr_t)0ul]; + t15.len = (uintptr_t)4ul; + t16 = unicode_utf8Encode__6665(t14, t15); + t3 = t16.error == UINT16_C(0); + if (t3) { + t18 = t16.payload; + t19 = (uint8_t *)&t13; + t19 = (uint8_t *)(((uintptr_t)t19) + ((uintptr_t)0ul*sizeof(uint8_t))); + t5 = (uintptr_t)t18; + t15.ptr = t19; + t15.len = t5; + memcpy(&t20, &t15, sizeof(nav__6834_39)); + t17 = t20; + goto zig_block_4; + + } + t12 = t16.error; + switch (t12) { + case zig_error_Utf8CannotEncodeSurrogateHalf: + case zig_error_CodepointTooLarge: { + t17 = (nav__6834_39){(uint8_t const *)&__anon_4969,(uintptr_t)3ul}; + goto zig_block_4; + } + default: zig_unreachable(); + } + + zig_block_4:; + t18 = a1.alignment; + switch (t18) { + case UINT8_C(0): { + t10 = (*t1); + t12 = io_Writer_writeAll__6127(t10, a0); + if (t12) { + return t12; + } + t10 = (*t1); + t12 = io_Writer_writeBytesNTimes__6131(t10, t17, t7); + if (t12) { + return t12; + } + goto zig_block_6; + } + case UINT8_C(1): { + t5 = t7 / (uintptr_t)2ul; + t7 = t7 + (uintptr_t)1ul; + t7 = t7 / (uintptr_t)2ul; + t10 = (*t1); + t12 = io_Writer_writeBytesNTimes__6131(t10, t17, t5); + if (t12) { + return t12; + } + t10 = (*t1); + t12 = io_Writer_writeAll__6127(t10, a0); + if (t12) { + return t12; + } + t10 = (*t1); + t12 = io_Writer_writeBytesNTimes__6131(t10, t17, t7); + if (t12) { + return t12; + } + goto zig_block_6; + } + case UINT8_C(2): { + t10 = (*t1); + t12 = io_Writer_writeBytesNTimes__6131(t10, t17, t7); + if (t12) { + return t12; + } + t10 = (*t1); + t12 = io_Writer_writeAll__6127(t10, a0); + if (t12) { + return t12; + } + goto zig_block_6; + } + default: zig_unreachable(); + } + + zig_block_6:; + goto zig_block_0; + } + t10 = (*t1); + t12 = io_Writer_writeAll__6127(t10, a0); + if (t12) { + return t12; + } + goto zig_block_0; + + zig_block_0:; + return 0; +} + +static nav__4198_38 io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29_write__4198(struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const a0, nav__4198_42 const a1) { + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *t1; + nav__4198_42 const *t3; + uintptr_t t4; + uintptr_t t10; + uint64_t t5; + uint64_t t11; + uintptr_t *t7; + nav__4198_50 *t8; + nav__4198_50 t9; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t12; + struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *t0; + uint8_t *t13; + nav__4198_42 t14; + nav__4198_42 t2; + uint8_t const *t15; + nav__4198_38 t16; + bool t6; + t0 = a0; + t1 = (struct io_fixed_buffer_stream_FixedBufferStream_28_5b_5du8_29__2223 *const *)&t0; + t2 = a1; + t3 = (nav__4198_42 const *)&t2; + t4 = a1.len; + t5 = t4; + t6 = t5 == UINT64_C(0); + if (t6) { + return (nav__4198_38){(uintptr_t)0ul,0}; + } + goto zig_block_0; + + zig_block_0:; + t7 = (uintptr_t *)&a0->pos; + t4 = (*t7); + t8 = (nav__4198_50 *)&a0->buffer; + t9 = (*t8); + t10 = t9.len; + t5 = t4; + t11 = t10; + t6 = t5 >= t11; + if (t6) { + return (nav__4198_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NoSpaceLeft}; + } + goto zig_block_1; + + zig_block_1:; + t8 = (nav__4198_50 *)&a0->buffer; + t9 = (*t8); + t10 = t9.len; + t7 = (uintptr_t *)&a0->pos; + t4 = (*t7); + t4 = t10 - t4; + t10 = a1.len; + t10 = (t4 < t10) ? t4 : t10; + t11 = t10; + t12 = (*t1); + t8 = (nav__4198_50 *)&t12->buffer; + t7 = (uintptr_t *)&a0->pos; + t10 = (*t7); + t9 = (*t8); + t13 = t9.ptr; + t13 = (uint8_t *)(((uintptr_t)t13) + (t10*sizeof(uint8_t))); + t10 = t11; + t9.ptr = t13; + t9.len = t10; + t14 = (*t3); + t15 = t14.ptr; + t15 = (uint8_t const *)(((uintptr_t)t15) + ((uintptr_t)0ul*sizeof(uint8_t))); + t10 = t11; + t14.ptr = t15; + t14.len = t10; + t15 = t14.ptr; + if (t9.len != 0) memcpy(t9.ptr, t15, t9.len * sizeof(uint8_t)); + t12 = (*t1); + t7 = (uintptr_t *)&t12->pos; + t10 = (*t7); + t4 = t11; + t4 = t10 + t4; + (*t7) = t4; + t6 = t11 == UINT64_C(0); + if (t6) { + return (nav__4198_38){(uintptr_t)0xaaaaaaaaaaaaaaaaul,zig_error_NoSpaceLeft}; + } + goto zig_block_2; + + zig_block_2:; + t4 = t11; + t16.payload = t4; + t16.error = UINT16_C(0); + return t16; +} + +static nav__4108_38 io_dummyWrite__4108(nav__4108_40 const a0) { + uintptr_t t0; + nav__4108_38 t1; + t0 = a0.len; + t1.payload = t0; + t1.error = UINT16_C(0); + return t1; +} + +static nav__6670_38 unicode_utf8Decode__6670(nav__6670_40 const a0) { + nav__6670_40 const *t1; + uintptr_t t2; + nav__6670_40 t7; + nav__6670_40 t0; + uint8_t const *t8; + uint8_t const (*t9)[2]; + uint8_t const (*t13)[3]; + uint8_t const (*t16)[4]; + nav__6670_38 t3; + nav__6670_38 t6; + nav__6670_38 t12; + uint32_t t5; + uint8_t t4; + uint8_t t10[2]; + nav__6670_48 t11; + uint8_t t14[3]; + nav__6670_52 t15; + uint8_t t17[4]; + nav__6670_56 t18; + t0 = a0; + t1 = (nav__6670_40 const *)&t0; + t2 = a0.len; + switch (t2) { + case (uintptr_t)1ul: { + t4 = a0.ptr[(uintptr_t)0ul]; + t5 = (uint32_t)t4; + t6.payload = t5; + t6.error = UINT16_C(0); + t3 = t6; + goto zig_block_0; + } + case (uintptr_t)2ul: { + t7 = (*t1); + t8 = t7.ptr; + t8 = (uint8_t const *)(((uintptr_t)t8) + ((uintptr_t)0ul*sizeof(uint8_t))); + t9 = (uint8_t const (*)[2])t8; + memcpy(t10, (const char *)t9, sizeof(uint8_t[2])); + memcpy(t11.array, t10, sizeof(nav__6670_48)); + t6 = unicode_utf8Decode2__6672(t11); + memcpy(&t12, &t6, sizeof(nav__6670_38)); + t3 = t12; + goto zig_block_0; + } + case (uintptr_t)3ul: { + t7 = (*t1); + t8 = t7.ptr; + t8 = (uint8_t const *)(((uintptr_t)t8) + ((uintptr_t)0ul*sizeof(uint8_t))); + t13 = (uint8_t const (*)[3])t8; + memcpy(t14, (const char *)t13, sizeof(uint8_t[3])); + memcpy(t15.array, t14, sizeof(nav__6670_52)); + t12 = unicode_utf8Decode3__6674(t15); + memcpy(&t6, &t12, sizeof(nav__6670_38)); + t3 = t6; + goto zig_block_0; + } + case (uintptr_t)4ul: { + t7 = (*t1); + t8 = t7.ptr; + t8 = (uint8_t const *)(((uintptr_t)t8) + ((uintptr_t)0ul*sizeof(uint8_t))); + t16 = (uint8_t const (*)[4])t8; + memcpy(t17, (const char *)t16, sizeof(uint8_t[4])); + memcpy(t18.array, t17, sizeof(nav__6670_56)); + t12 = unicode_utf8Decode4__6678(t18); + memcpy(&t6, &t12, sizeof(nav__6670_38)); + t3 = t6; + goto zig_block_0; + } + default: { + zig_unreachable(); + } + } + + zig_block_0:; + return t3; +} + +static nav__6663_38 unicode_utf8CodepointSequenceLength__6663(uint32_t const a0) { + bool t0; + t0 = a0 < UINT32_C(128); + if (t0) { + return (nav__6663_38){0,UINT8_C(1)}; + } + goto zig_block_0; + + zig_block_0:; + t0 = a0 < UINT32_C(2048); + if (t0) { + return (nav__6663_38){0,UINT8_C(2)}; + } + goto zig_block_1; + + zig_block_1:; + t0 = a0 < UINT32_C(65536); + if (t0) { + return (nav__6663_38){0,UINT8_C(3)}; + } + goto zig_block_2; + + zig_block_2:; + t0 = a0 < UINT32_C(1114112); + if (t0) { + return (nav__6663_38){0,UINT8_C(4)}; + } + goto zig_block_3; + + zig_block_3:; + return (nav__6663_38){zig_error_CodepointTooLarge,UINT8_C(0x2)}; +} + +static nav__6835_38 unicode_utf8EncodeImpl__anon_5001__6835(uint32_t const a0, nav__6835_40 const a1) { + nav__6835_40 const *t1; + uintptr_t t5; + uint64_t t6; + uint64_t t7; + nav__6835_40 t9; + nav__6835_40 t0; + uint8_t *t10; + uint32_t t12; + nav__6835_38 t2; + uint16_t t3; + uint8_t t4; + uint8_t t11; + bool t8; + t0 = a1; + t1 = (nav__6835_40 const *)&t0; + t2 = unicode_utf8CodepointSequenceLength__6663(a0); + if (t2.error) { + t3 = t2.error; + t2.payload = UINT8_C(0x2); + t2.error = t3; + return t2; + } + t4 = t2.payload; + t5 = a1.len; + t6 = t5; + t7 = (uint64_t)t4; + t8 = t6 >= t7; + debug_assert__180(t8); + switch (t4) { + case UINT8_C(1): { + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)0ul]; + t11 = (uint8_t)a0; + (*t10) = t11; + goto zig_block_0; + } + case UINT8_C(2): { + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)0ul]; + t12 = zig_shr_u32(a0, UINT8_C(6)); + t12 = UINT32_C(192) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)1ul]; + t12 = a0 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + goto zig_block_0; + } + case UINT8_C(3): { + t8 = unicode_isSurrogateCodepoint__6743(a0); + if (t8) { + return (nav__6835_38){zig_error_Utf8CannotEncodeSurrogateHalf,UINT8_C(0x2)}; + } + goto zig_block_1; + + zig_block_1:; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)0ul]; + t12 = zig_shr_u32(a0, UINT8_C(12)); + t12 = UINT32_C(224) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)1ul]; + t12 = zig_shr_u32(a0, UINT8_C(6)); + t12 = t12 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)2ul]; + t12 = a0 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + goto zig_block_0; + } + case UINT8_C(4): { + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)0ul]; + t12 = zig_shr_u32(a0, UINT8_C(18)); + t12 = UINT32_C(240) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)1ul]; + t12 = zig_shr_u32(a0, UINT8_C(12)); + t12 = t12 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)2ul]; + t12 = zig_shr_u32(a0, UINT8_C(6)); + t12 = t12 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + t9 = (*t1); + t10 = &t9.ptr[(uintptr_t)3ul]; + t12 = a0 & UINT32_C(63); + t12 = UINT32_C(128) | t12; + t11 = (uint8_t)t12; + (*t10) = t11; + goto zig_block_0; + } + default: { + zig_unreachable(); + } + } + + zig_block_0:; + t2.payload = t4; + t2.error = UINT16_C(0); + return t2; +} + +static nav__6665_38 unicode_utf8Encode__6665(uint32_t const a0, nav__6665_40 const a1) { + nav__6665_38 t0; + nav__6665_38 t1; + t0 = unicode_utf8EncodeImpl__anon_5001__6835(a0, a1); + memcpy(&t1, &t0, sizeof(nav__6665_38)); + return t1; +} + +static uint16_t io_Writer_writeBytesNTimes__6131(struct io_Writer__3718 const a0, nav__6131_40 const a1, uintptr_t const a2) { + struct io_Writer__3718 const *t1; + uintptr_t t3; + uintptr_t t2; + uint64_t t4; + uint64_t t5; + struct io_Writer__3718 t7; + struct io_Writer__3718 t0; + uint16_t t8; + bool t6; + t0 = a0; + t1 = (struct io_Writer__3718 const *)&t0; + t2 = (uintptr_t)0ul; + zig_loop_9: + t3 = t2; + t4 = t3; + t5 = a2; + t6 = t4 < t5; + if (t6) { + t7 = (*t1); + t8 = io_Writer_writeAll__6127(t7, a1); + if (t8) { + return t8; + } + t3 = t2; + t3 = t3 + (uintptr_t)1ul; + t2 = t3; + goto zig_block_1; + } + goto zig_block_0; + + zig_block_1:; + goto zig_loop_9; + + zig_block_0:; + return 0; +} + +static nav__6672_38 unicode_utf8Decode2__6672(nav__6672_40 const a0) { + uint32_t t3; + uint32_t t4; + uint32_t t2; + nav__6672_38 t5; + uint8_t t0; + bool t1; + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(224); + t1 = t0 == UINT8_C(192); + debug_assert__180(t1); + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(31); + t3 = (uint32_t)t0; + t2 = t3; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6672_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_0; + + zig_block_0:; + t3 = t2; + t3 = zig_shlw_u32(t3, UINT8_C(6), UINT8_C(21)); + t2 = t3; + t3 = t2; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(63); + t4 = (uint32_t)t0; + t4 = t3 | t4; + t2 = t4; + t4 = t2; + t1 = t4 < UINT32_C(128); + if (t1) { + return (nav__6672_38){UINT32_C(0xaaaaa),zig_error_Utf8OverlongEncoding}; + } + goto zig_block_1; + + zig_block_1:; + t4 = t2; + t5.payload = t4; + t5.error = UINT16_C(0); + return t5; +} + +static nav__6674_38 unicode_utf8Decode3__6674(nav__6674_40 const a0) { + nav__6674_38 t1; + uint32_t t3; + uint16_t t2; + nav__6674_40 t0; + bool t4; + bool t5; + memcpy(t0.array, a0.array, sizeof(nav__6674_40)); + t1 = unicode_utf8Decode3AllowSurrogateHalf__6676(t0); + if (t1.error) { + t2 = t1.error; + t1.payload = UINT32_C(0xaaaaa); + t1.error = t2; + return t1; + } + t3 = t1.payload; + t4 = UINT32_C(55296) <= t3; + if (t4) { + t4 = t3 <= UINT32_C(57343); + t5 = t4; + goto zig_block_1; + } + t5 = false; + goto zig_block_1; + + zig_block_1:; + if (t5) { + return (nav__6674_38){UINT32_C(0xaaaaa),zig_error_Utf8EncodesSurrogateHalf}; + } + goto zig_block_0; + + zig_block_0:; + t1.payload = t3; + t1.error = UINT16_C(0); + return t1; +} + +static nav__6678_38 unicode_utf8Decode4__6678(nav__6678_40 const a0) { + uint32_t t3; + uint32_t t4; + uint32_t t2; + nav__6678_38 t5; + uint8_t t0; + bool t1; + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(248); + t1 = t0 == UINT8_C(240); + debug_assert__180(t1); + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(7); + t3 = (uint32_t)t0; + t2 = t3; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6678_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_0; + + zig_block_0:; + t3 = t2; + t3 = zig_shlw_u32(t3, UINT8_C(6), UINT8_C(21)); + t2 = t3; + t3 = t2; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(63); + t4 = (uint32_t)t0; + t4 = t3 | t4; + t2 = t4; + t0 = a0.array[(uintptr_t)2ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6678_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_1; + + zig_block_1:; + t4 = t2; + t4 = zig_shlw_u32(t4, UINT8_C(6), UINT8_C(21)); + t2 = t4; + t4 = t2; + t0 = a0.array[(uintptr_t)2ul]; + t0 = t0 & UINT8_C(63); + t3 = (uint32_t)t0; + t3 = t4 | t3; + t2 = t3; + t0 = a0.array[(uintptr_t)3ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6678_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_2; + + zig_block_2:; + t3 = t2; + t3 = zig_shlw_u32(t3, UINT8_C(6), UINT8_C(21)); + t2 = t3; + t3 = t2; + t0 = a0.array[(uintptr_t)3ul]; + t0 = t0 & UINT8_C(63); + t4 = (uint32_t)t0; + t4 = t3 | t4; + t2 = t4; + t4 = t2; + t1 = t4 < UINT32_C(65536); + if (t1) { + return (nav__6678_38){UINT32_C(0xaaaaa),zig_error_Utf8OverlongEncoding}; + } + goto zig_block_3; + + zig_block_3:; + t4 = t2; + t1 = t4 > UINT32_C(1114111); + if (t1) { + return (nav__6678_38){UINT32_C(0xaaaaa),zig_error_Utf8CodepointTooLarge}; + } + goto zig_block_4; + + zig_block_4:; + t4 = t2; + t5.payload = t4; + t5.error = UINT16_C(0); + return t5; +} + +static bool unicode_isSurrogateCodepoint__6743(uint32_t const a0) { + bool t0; + switch (a0) { + default: if ((a0 >= UINT32_C(55296) && a0 <= UINT32_C(57343))) { + t0 = true; + goto zig_block_0; + }{ + t0 = false; + goto zig_block_0; + } + } + + zig_block_0:; + return t0; +} + +static nav__6676_38 unicode_utf8Decode3AllowSurrogateHalf__6676(nav__6676_40 const a0) { + uint32_t t3; + uint32_t t4; + uint32_t t2; + nav__6676_38 t5; + uint8_t t0; + bool t1; + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(240); + t1 = t0 == UINT8_C(224); + debug_assert__180(t1); + t0 = a0.array[(uintptr_t)0ul]; + t0 = t0 & UINT8_C(15); + t3 = (uint32_t)t0; + t2 = t3; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6676_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_0; + + zig_block_0:; + t3 = t2; + t3 = zig_shlw_u32(t3, UINT8_C(6), UINT8_C(21)); + t2 = t3; + t3 = t2; + t0 = a0.array[(uintptr_t)1ul]; + t0 = t0 & UINT8_C(63); + t4 = (uint32_t)t0; + t4 = t3 | t4; + t2 = t4; + t0 = a0.array[(uintptr_t)2ul]; + t0 = t0 & UINT8_C(192); + t1 = t0 != UINT8_C(128); + if (t1) { + return (nav__6676_38){UINT32_C(0xaaaaa),zig_error_Utf8ExpectedContinuation}; + } + goto zig_block_1; + + zig_block_1:; + t4 = t2; + t4 = zig_shlw_u32(t4, UINT8_C(6), UINT8_C(21)); + t2 = t4; + t4 = t2; + t0 = a0.array[(uintptr_t)2ul]; + t0 = t0 & UINT8_C(63); + t3 = (uint32_t)t0; + t3 = t4 | t3; + t2 = t3; + t3 = t2; + t1 = t3 < UINT32_C(2048); + if (t1) { + return (nav__6676_38){UINT32_C(0xaaaaa),zig_error_Utf8OverlongEncoding}; + } + goto zig_block_2; + + zig_block_2:; + t3 = t2; + t5.payload = t3; + t5.error = UINT16_C(0); + return t5; +} + +static uint64_t const builtin_zig_backend__247 = UINT64_C(3); + +static bool const start_simplified_logic__109 = false; + +static uint8_t const builtin_output_mode__248 = UINT8_C(1); + +static uint8_t const builtin_link_mode__249 = UINT8_C(0); + +static uint64_t features_features__314 = ((uint64_t)UINT64_C(0)); + +static uintptr_t const bit_set_IntegerBitSet_2864_29_bit_length__345 = 64ul; + +static bool const builtin_link_libc__259 = true; + +static bool const posix_use_libc__1269 = true; + +static struct Target_Os__625 const builtin_os__255 = {{ .linux = {{{6ul,12ul,42ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}},{6ul,12ul,42ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}}},{2ul,39ul,0ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}},UINT32_C(14)} },UINT8_C(9)}; + +static uint8_t const c_native_os__1711 = UINT8_C(9); + +static struct Target_DynamicLinker__801 const Target_DynamicLinker_none__3433 = {"\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252",UINT8_C(0)}; + +static bool const builtin_is_test__251 = false; + +static uint8_t *heap_CAllocator_alloc__3557(void *const a0, uintptr_t const a1, uint8_t const a2, uintptr_t const a3) { + uint64_t t0; + uint8_t *t2; + bool t1; + (void)a0; + (void)a3; + t0 = a1; + t1 = t0 > UINT64_C(0); + debug_assert__180(t1); + t2 = heap_CAllocator_alignedAlloc__3554(a1, a2); + return t2; +} + +static bool heap_CAllocator_resize__3558(void *const a0, nav__3558_40 const a1, uint8_t const a2, uintptr_t const a3, uintptr_t const a4) { + uintptr_t t0; + uint64_t t1; + uint64_t t2; + uint8_t *t4; + bool t3; + (void)a0; + (void)a2; + (void)a4; + t0 = a1.len; + t1 = a3; + t2 = t0; + t3 = t1 <= t2; + if (t3) { + return true; + } + goto zig_block_0; + + zig_block_0:; + t4 = a1.ptr; + t0 = heap_CAllocator_alignedAllocSize__3556(t4); + t2 = a3; + t1 = t0; + t3 = t2 <= t1; + if (t3) { + return true; + } + goto zig_block_1; + + zig_block_1:; + return false; +} + +static uint8_t *heap_CAllocator_remap__3559(void *const a0, nav__3559_40 const a1, uint8_t const a2, uintptr_t const a3, uintptr_t const a4) { + uint8_t *t0; + uint8_t *t2; + uint8_t *t3; + bool t1; + t1 = heap_CAllocator_resize__3558(a0, a1, a2, a3, a4); + if (t1) { + t2 = a1.ptr; + t3 = (uint8_t *)t2; + t0 = t3; + goto zig_block_0; + } + t0 = NULL; + goto zig_block_0; + + zig_block_0:; + return t0; +} + +static void heap_CAllocator_free__3560(void *const a0, nav__3560_40 const a1, uint8_t const a2, uintptr_t const a3) { + uint8_t *t0; + (void)a0; + (void)a2; + (void)a3; + t0 = a1.ptr; + heap_CAllocator_alignedFree__3555(t0); + return; +} + +static uint8_t *heap_CAllocator_alignedAlloc__3554(uintptr_t const a0, uint8_t const a1) { + uint8_t const *t1; + uintptr_t t3; + uint64_t t4; + void *t9; + void *t5; + uint8_t *t10; + int t6; + int32_t t7; + uint8_t t2; + uint8_t t0; + bool t8; + t0 = a1; + t1 = (uint8_t const *)&t0; + t2 = (*t1); + t3 = mem_Alignment_toByteUnits__1031(t2); + t3 = ((uintptr_t)8ul > t3) ? (uintptr_t)8ul : t3; + t4 = t3; + t3 = t4; + t6 = posix_memalign(&t5, t3, a0); + t7 = t6; + t8 = t7 != INT32_C(0); + if (t8) { + return NULL; + } + goto zig_block_0; + + zig_block_0:; + t9 = t5; + t10 = (uint8_t *)t9; + return t10; +} + +static uintptr_t heap_CAllocator_alignedAllocSize__3556(uint8_t *const a0) { + void const *t0; + void const *t1; + uintptr_t t2; + t0 = (void const *)a0; + t1 = t0; + t2 = malloc_usable_size(t1); + return t2; +} + +static void heap_CAllocator_alignedFree__3555(uint8_t *const a0) { + void *t0; + void *t1; + t0 = (void *)a0; + t1 = t0; + free(t1); + return; +} + +static uintptr_t mem_Alignment_toByteUnits__1031(uint8_t const a0) { + uintptr_t t1; + uint8_t t0; + t0 = a0; + t1 = zig_shlw_u64((uintptr_t)1ul, t0, UINT8_C(64)); + return t1; +} + +static struct mem_Allocator__565 const heap_c_allocator__3522 = {((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)&heap_CAllocator_vtable__3549)}; + +static struct mem_Allocator__565 const c_allocator__233 = {((void *)(uintptr_t)0xaaaaaaaaaaaaaaaaul),((struct mem_Allocator_VTable__568 const *)&heap_CAllocator_vtable__3549)}; + +static struct Target_Cpu_Feature_Set__817 const Target_Cpu_Feature_Set_empty__3478 = {{0ul,0ul,0ul,0ul,0ul}}; + +static struct Target_Cpu__786 const builtin_cpu__254 = {((struct Target_Cpu_Model__812 const *)&Target_x86_cpu_tigerlake__3664),{{520940646282633456ul,1395587583170564232ul,15691147860389201544ul,3ul,0ul}},UINT8_C(42)}; + +static uint8_t const builtin_abi__253 = UINT8_C(1); + +static uint8_t const builtin_object_format__257 = UINT8_C(0); + +static struct Target__623 const builtin_target__256 = {{((struct Target_Cpu_Model__812 const *)&Target_x86_cpu_tigerlake__3664),{{520940646282633456ul,1395587583170564232ul,15691147860389201544ul,3ul,0ul}},UINT8_C(42)},{{ .linux = {{{6ul,12ul,42ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}},{6ul,12ul,42ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}}},{2ul,39ul,0ul,{NULL,0xaaaaaaaaaaaaaaaaul},{NULL,0xaaaaaaaaaaaaaaaaul}},UINT32_C(14)} },UINT8_C(9)},UINT8_C(1),UINT8_C(0),{"/nix/store/zdpby3l6azi78sl83cpad2qjpfj25aqx-glibc-2.40-66/lib/ld-linux-x86-64.so.2\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252",UINT8_C(82)}}; + +static struct builtin_CallingConvention__266 const builtin_CallingConvention_c__455 = {{ .x86_64_sysv = {{UINT64_C(0xaaaaaaaaaaaaaaaa),true}} },UINT8_C(4)}; + +static uint8_t const (*const shared_RUNNER_CTL_FIFO__3685)[21] = &__anon_1890; + +static uint8_t const (*const shared_RUNNER_ACK_FIFO__3686)[21] = &__anon_1950; + +static uint8_t const mem_native_endian__755 = UINT8_C(1); + +static uint8_t const fs_path_native_os__3824 = UINT8_C(9); + +static uint8_t const fs_native_os__1081 = UINT8_C(9); + +static uint8_t const fs_Dir_native_os__3808 = UINT8_C(9); + +static uint8_t const os_linux_native_arch__2617 = UINT8_C(42); + +static int const cimport_EINTR__5842 = 4; + +static uint8_t const builtin_mode__258 = UINT8_C(3); + +static bool const debug_runtime_safety__159 = false; + +static bool const fs_Dir_have_flock__3809 = true; + +static bool const fs_File_is_windows__1257 = false; + +static uint8_t const posix_native_os__1267 = UINT8_C(9); + +static bool const posix_lfs64_abi__1697 = true; + +static uint8_t const c_native_abi__1709 = UINT8_C(1); + +static bool const posix_unexpected_error_tracing__1698 = false; + +static bool const mem_backend_supports_vectors__783 = true; + +static bool const builtin_valgrind_support__262 = false; + +static bool const debug_default_enable_segfault_handler__205 = false; + +static uint8_t const log_default_level__6368 = UINT8_C(0); + +static struct std_Options__4093 const std_options__97 = {{0xaaaaaaaaaaaaaaaaul,true},{0xaaaaaaaaaaaaaaaaul,true},3ul,false,UINT8_C(0),false,true,false,false,false,UINT8_C(2)}; + +static nav__3538_38 const heap_page_size_min_default__3538 = {4096ul,false}; + +static uintptr_t const heap_page_size_min__3517 = 4096ul; + +static uint16_t const fmt_max_format_args__6436 = UINT16_C(32); + +static uint8_t const (*const fmt_ANY__6439)[4] = &__anon_4843; + +static uint8_t const unicode_native_endian__6661 = UINT8_C(1); + +static uint32_t const unicode_replacement_character__6662 = UINT32_C(65533); + +static struct mem_Allocator_VTable__568 const heap_CAllocator_vtable__3549 = {&heap_CAllocator_alloc__3557,&heap_CAllocator_resize__3558,&heap_CAllocator_remap__3559,&heap_CAllocator_free__3560}; + +static bool const heap_CAllocator_supports_malloc_size__3550 = true; + +static bool const heap_CAllocator_supports_posix_memalign__3552 = true; + +static struct Target_Cpu_Model__812 const Target_x86_cpu_tigerlake__3664 = {{(uint8_t const *)&__anon_5093,9ul},{(uint8_t const *)&__anon_5093,9ul},{{221451271053508784ul,1377573176070099080ul,10502957384071186056ul,3ul,0ul}}}; + +#endif +#ifndef WRAPPER_H +#define WRAPPER_H + +#include + +#ifndef _WIN32 +#include "callgrind.h" +#include "valgrind.h" + +uint8_t running_on_valgrind() { return RUNNING_ON_VALGRIND > 0; } + +void callgrind_dump_stats() { CALLGRIND_DUMP_STATS; } + +void callgrind_dump_stats_at(uint8_t const* pos_str) { + CALLGRIND_DUMP_STATS_AT(pos_str); +} + +void callgrind_zero_stats() { CALLGRIND_ZERO_STATS; } + +void callgrind_start_instrumentation() { CALLGRIND_START_INSTRUMENTATION; } + +void callgrind_stop_instrumentation() { CALLGRIND_STOP_INSTRUMENTATION; } + +#else +// Windows and other platforms - provide no-op implementations +uint8_t running_on_valgrind() { return 0; } + +void callgrind_dump_stats() {} + +void callgrind_dump_stats_at(uint8_t const* pos_str) {} + +void callgrind_zero_stats() {} + +void callgrind_start_instrumentation() {} + +void callgrind_stop_instrumentation() {} +#endif + +#endif diff --git a/testing/capi/instrument-hooks/includes/callgrind.h b/testing/capi/instrument-hooks/includes/callgrind.h new file mode 100644 index 00000000..589b020f --- /dev/null +++ b/testing/capi/instrument-hooks/includes/callgrind.h @@ -0,0 +1,124 @@ +/* + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (callgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of callgrind, a valgrind tool for cache simulation + and call tree tracing. + + Copyright (C) 2003-2017 Josef Weidendorfer. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (callgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + +#ifndef __CALLGRIND_H +#define __CALLGRIND_H + +#include "valgrind.h" + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. + + The identification ('C','T') for Callgrind has historical + reasons: it was called "Calltree" before. Besides, ('C','G') would + clash with cachegrind. + */ + +typedef enum { + VG_USERREQ__DUMP_STATS = VG_USERREQ_TOOL_BASE('C', 'T'), + VG_USERREQ__ZERO_STATS, + VG_USERREQ__TOGGLE_COLLECT, + VG_USERREQ__DUMP_STATS_AT, + VG_USERREQ__START_INSTRUMENTATION, + VG_USERREQ__STOP_INSTRUMENTATION +} Vg_CallgrindClientRequest; + +/* Dump current state of cost centers, and zero them afterwards */ +#define CALLGRIND_DUMP_STATS \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS, 0, 0, 0, 0, 0) + +/* Dump current state of cost centers, and zero them afterwards. + The argument is appended to a string stating the reason which triggered + the dump. This string is written as a description field into the + profile data dump. */ +#define CALLGRIND_DUMP_STATS_AT(pos_str) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS_AT, pos_str, 0, 0, 0, \ + 0) + +/* Zero cost centers */ +#define CALLGRIND_ZERO_STATS \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ZERO_STATS, 0, 0, 0, 0, 0) + +/* Toggles collection state. + The collection state specifies whether the happening of events + should be noted or if they are to be ignored. Events are noted + by increment of counters in a cost center */ +#define CALLGRIND_TOGGLE_COLLECT \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__TOGGLE_COLLECT, 0, 0, 0, 0, 0) + +/* Start full callgrind instrumentation if not already switched on. + When cache simulation is done, it will flush the simulated cache; + this will lead to an artificial cache warmup phase afterwards with + cache misses which would not have happened in reality. */ +#define CALLGRIND_START_INSTRUMENTATION \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__START_INSTRUMENTATION, 0, 0, 0, \ + 0, 0) + +/* Stop full callgrind instrumentation if not already switched off. + This flushes Valgrinds translation cache, and does no additional + instrumentation afterwards, which effectivly will run at the same + speed as the "none" tool (ie. at minimal slowdown). + Use this to bypass Callgrind aggregation for uninteresting code parts. + To start Callgrind in this mode to ignore the setup phase, use + the option "--instr-atstart=no". */ +#define CALLGRIND_STOP_INSTRUMENTATION \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STOP_INSTRUMENTATION, 0, 0, 0, \ + 0, 0) + +#endif /* __CALLGRIND_H */ diff --git a/testing/capi/instrument-hooks/includes/core.h b/testing/capi/instrument-hooks/includes/core.h new file mode 100644 index 00000000..8f78ac14 --- /dev/null +++ b/testing/capi/instrument-hooks/includes/core.h @@ -0,0 +1,77 @@ +// This file was manually created and exposes the functions of this library. +// TODO: Can we automatically generate this file? + +#ifndef CORE_H +#define CORE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _WIN32 +#include "callgrind.h" +#include "valgrind.h" +#else +#define CALLGRIND_START_INSTRUMENTATION +#define CALLGRIND_STOP_INSTRUMENTATION +#define CALLGRIND_ZERO_STATS +#endif + +typedef uint64_t *InstrumentHooks; + +InstrumentHooks *instrument_hooks_init(void); +void instrument_hooks_deinit(InstrumentHooks *); + +bool instrument_hooks_is_instrumented(InstrumentHooks *); +int8_t instrument_hooks_start_benchmark(InstrumentHooks *); +int8_t instrument_hooks_stop_benchmark(InstrumentHooks *); +int8_t instrument_hooks_set_executed_benchmark(InstrumentHooks *, int32_t pid, + const char *uri); +// Deprecated: use instrument_hooks_set_executed_benchmark instead +int8_t instrument_hooks_executed_benchmark(InstrumentHooks *, int32_t pid, + const char *uri); +int8_t instrument_hooks_set_integration(InstrumentHooks *, const char *name, + const char *version); + +int8_t callgrind_start_instrumentation(); +int8_t callgrind_stop_instrumentation(); + +// Feature flags for instrument hooks + +typedef enum { + FEATURE_DISABLE_CALLGRIND_MARKERS = 0, +} instrument_hooks_feature_t; + +void instrument_hooks_set_feature(instrument_hooks_feature_t feature, + bool enabled); + +// Header functions that will be inlined. This can be used by languages that +// directly consume the headers such as C or C++. This will allow for more +// precise tracking of the benchmark performance. + +static inline int8_t instrument_hooks_start_benchmark_inline( + InstrumentHooks *instance) { + instrument_hooks_set_feature(FEATURE_DISABLE_CALLGRIND_MARKERS, true); + if (instrument_hooks_start_benchmark(instance) != 0) { + return 1; + } + + CALLGRIND_ZERO_STATS; + CALLGRIND_START_INSTRUMENTATION; + return 0; +} + +static inline int8_t instrument_hooks_stop_benchmark_inline( + InstrumentHooks *instance) { + CALLGRIND_STOP_INSTRUMENTATION; + return instrument_hooks_stop_benchmark(instance); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/testing/capi/instrument-hooks/includes/valgrind.h b/testing/capi/instrument-hooks/includes/valgrind.h new file mode 100644 index 00000000..7455909c --- /dev/null +++ b/testing/capi/instrument-hooks/includes/valgrind.h @@ -0,0 +1,7168 @@ +/* -*- c -*- + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (valgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2000-2017 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (valgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query Valgrind's + execution inside your own programs. + + The resulting executables will still run without Valgrind, just a + little bit more slowly than they otherwise would, but otherwise + unchanged. When not running on valgrind, each client request + consumes very few (eg. 7) instructions, so the resulting performance + loss is negligible unless you plan to execute client requests + millions of times per second. Nevertheless, if that is still a + problem, you can compile with the NVALGRIND symbol defined (gcc + -DNVALGRIND) so that client requests are not even compiled in. */ + +#ifndef __VALGRIND_H +#define __VALGRIND_H + + +/* ------------------------------------------------------------------ */ +/* VERSION NUMBER OF VALGRIND */ +/* ------------------------------------------------------------------ */ + +/* Specify Valgrind's version number, so that user code can + conditionally compile based on our version number. Note that these + were introduced at version 3.6 and so do not exist in version 3.5 + or earlier. The recommended way to use them to check for "version + X.Y or later" is (eg) + +#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ + && (__VALGRIND_MAJOR__ > 3 \ + || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) +*/ +#define __VALGRIND_MAJOR__ 3 +#define __VALGRIND_MINOR__ 24 + + +#include + +/* Nb: this file might be included in a file compiled with -ansi. So + we can't use C++ style "//" comments nor the "asm" keyword (instead + use "__asm__"). */ + +/* Derive some tags indicating what the target platform is. Note + that in this file we're using the compiler's CPP symbols for + identifying architectures, which are different to the ones we use + within the rest of Valgrind. Note, __powerpc__ is active for both + 32 and 64-bit PPC, whereas __powerpc64__ is only active for the + latter (on Linux, that is). + + Misc note: how to find out what's predefined in gcc by default: + gcc -Wp,-dM somefile.c +*/ +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_freebsd +#undef PLAT_amd64_freebsd +#undef PLAT_arm64_freebsd +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_arm64_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + + +#if defined(__APPLE__) && defined(__i386__) +# define PLAT_x86_darwin 1 +#elif defined(__APPLE__) && defined(__x86_64__) +# define PLAT_amd64_darwin 1 +#elif defined(__FreeBSD__) && defined(__i386__) +# define PLAT_x86_freebsd 1 +#elif defined(__FreeBSD__) && defined(__amd64__) +# define PLAT_amd64_freebsd 1 +#elif defined(__FreeBSD__) && defined(__aarch64__) && !defined(__arm__) +# define PLAT_arm64_freebsd 1 +#elif (defined(__MINGW32__) && defined(__i386__)) \ + || defined(__CYGWIN32__) \ + || (defined(_WIN32) && defined(_M_IX86)) +# define PLAT_x86_win32 1 +#elif (defined(__MINGW32__) && defined(__x86_64__)) \ + || (defined(_WIN32) && defined(_M_X64)) +/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */ +# define PLAT_amd64_win64 1 +#elif defined(__linux__) && defined(__i386__) +# define PLAT_x86_linux 1 +#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) +# define PLAT_amd64_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) +# define PLAT_ppc32_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 +/* Big Endian uses ELF version 1 */ +# define PLAT_ppc64be_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 +/* Little Endian uses ELF version 2 */ +# define PLAT_ppc64le_linux 1 +#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) +# define PLAT_arm_linux 1 +#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) +# define PLAT_arm64_linux 1 +#elif defined(__linux__) && defined(__s390__) && defined(__s390x__) +# define PLAT_s390x_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==64) +# define PLAT_mips64_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==32) +# define PLAT_mips32_linux 1 +#elif defined(__linux__) && defined(__nanomips__) +# define PLAT_nanomips_linux 1 +#elif defined(__sun) && defined(__i386__) +# define PLAT_x86_solaris 1 +#elif defined(__sun) && defined(__x86_64__) +# define PLAT_amd64_solaris 1 +#else +/* If we're not compiling for our target platform, don't generate + any inline asms. */ +# if !defined(NVALGRIND) +# define NVALGRIND 1 +# endif +#endif + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ +/* in here of use to end-users -- skip to the next section. */ +/* ------------------------------------------------------------------ */ + +/* + * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client + * request. Accepts both pointers and integers as arguments. + * + * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind + * client request that does not return a value. + + * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind + * client request and whose value equals the client request result. Accepts + * both pointers and integers as arguments. Note that such calls are not + * necessarily pure functions -- they may have side effects. + */ + +#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ + _zzq_request, _zzq_arg1, _zzq_arg2, \ + _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ + _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#if defined(NVALGRIND) + +/* Define NVALGRIND to completely remove the Valgrind magic sequence + from the compiled code (analogous to NDEBUG's effects on + assert()) */ +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + (_zzq_default) + +#else /* ! NVALGRIND */ + +/* The following defines the magic code sequences which the JITter + spots and handles magically. Don't look too closely at them as + they will rot your brain. + + The assembly code sequences for all architectures is in this one + file. This is because this file must be stand-alone, and we don't + want to have multiple files. + + For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default + value gets put in the return slot, so that everything works when + this is executed not under Valgrind. Args are passed in a memory + block, and so there's no intrinsic limit to the number that could + be passed, but it's currently five. + + The macro args are: + _zzq_rlval result lvalue + _zzq_default default value (result returned when running on real CPU) + _zzq_request request code + _zzq_arg1..5 request params + + The other two macros are used to support function wrapping, and are + a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the + guest's NRADDR pseudo-register and whatever other information is + needed to safely run the call original from the wrapper: on + ppc64-linux, the R2 value at the divert point is also needed. This + information is abstracted into a user-visible type, OrigFn. + + VALGRIND_CALL_NOREDIR_* behaves the same as the following on the + guest, but guarantees that the branch instruction will not be + redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: + branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a + complete inline asm, since it needs to be combined with more magic + inline asm stuff to be useful. +*/ + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + "xchgl %%ecx,%%ecx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%EAX */ \ + "xchgl %%edx,%%edx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgl %%edi,%%edi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) + || PLAT_x86_solaris */ + +/* ------------------------- x86-Win32 ------------------------- */ + +#if defined(PLAT_x86_win32) && !defined(__GNUC__) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#if defined(_MSC_VER) + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm rol edi, 3 __asm rol edi, 13 \ + __asm rol edi, 29 __asm rol edi, 19 + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ + (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ + (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ + (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) + +static __inline uintptr_t +valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, + uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, + uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, + uintptr_t _zzq_arg5) +{ + volatile uintptr_t _zzq_args[6]; + volatile unsigned int _zzq_result; + _zzq_args[0] = (uintptr_t)(_zzq_request); + _zzq_args[1] = (uintptr_t)(_zzq_arg1); + _zzq_args[2] = (uintptr_t)(_zzq_arg2); + _zzq_args[3] = (uintptr_t)(_zzq_arg3); + _zzq_args[4] = (uintptr_t)(_zzq_arg4); + _zzq_args[5] = (uintptr_t)(_zzq_arg5); + __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default + __SPECIAL_INSTRUCTION_PREAMBLE + /* %EDX = client_request ( %EAX ) */ + __asm xchg ebx,ebx + __asm mov _zzq_result, edx + } + return _zzq_result; +} + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + __asm xchg ecx,ecx \ + __asm mov __addr, eax \ + } \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX ERROR + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm xchg edi,edi \ + } \ + } while (0) + +#else +#error Unsupported compiler. +#endif + +#endif /* PLAT_x86_win32 */ + +/* ----------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) \ + || defined(PLAT_amd64_freebsd) \ + || (defined(PLAT_amd64_win64) && defined(__GNUC__)) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RAX = guest_NRADDR */ \ + "xchgq %%rcx,%%rcx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_RAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%RAX */ \ + "xchgq %%rdx,%%rdx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgq %%rdi,%%rdi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------- amd64-Win64 ------------------------- */ + +#if defined(PLAT_amd64_win64) && !defined(__GNUC__) + +#error Unsupported compiler. + +#endif /* PLAT_amd64_win64 */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ + "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned int _zzq_args[6]; \ + unsigned int _zzq_result; \ + unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +#if defined(PLAT_ppc64le_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R12 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ + "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("mov r3, %1\n\t" /*default*/ \ + "mov r4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = client_request ( R4 ) */ \ + "orr r10, r10, r10\n\t" \ + "mov %0, r3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "cc","memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = guest_NRADDR */ \ + "orr r11, r11, r11\n\t" \ + "mov %0, r3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R4 */ \ + "orr r12, r12, r12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr r9, r9, r9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-{linux,freebsd} ------------------------- */ + +#if defined(PLAT_arm64_linux) || defined(PLAT_arm64_freebsd) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ + "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("mov x3, %1\n\t" /*default*/ \ + "mov x4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = client_request ( X4 ) */ \ + "orr x10, x10, x10\n\t" \ + "mov %0, x3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" ((unsigned long int)(_zzq_default)), \ + "r" (&_zzq_args[0]) \ + : "cc","memory", "x3", "x4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = guest_NRADDR */ \ + "orr x11, x11, x11\n\t" \ + "mov %0, x3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "x3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir X8 */ \ + "orr x12, x12, x12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr x9, x9, x9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm64_linux || PLAT_arm64_freebsd */ + +/* ------------------------ s390x-linux ------------------------ */ + +#if defined(PLAT_s390x_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific + * code. This detection is implemented in platform specific toIR.c + * (e.g. VEX/priv/guest_s390_decoder.c). + */ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "lr 15,15\n\t" \ + "lr 1,1\n\t" \ + "lr 2,2\n\t" \ + "lr 3,3\n\t" + +#define __CLIENT_REQUEST_CODE "lr 2,2\n\t" +#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" +#define __CALL_NO_REDIR_CODE "lr 4,4\n\t" +#define __VEX_INJECT_IR_CODE "lr 5,5\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(/* r2 = args */ \ + "lgr 2,%1\n\t" \ + /* r3 = default */ \ + "lgr 3,%2\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CLIENT_REQUEST_CODE \ + /* results = r3 */ \ + "lgr %0, 3\n\t" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), \ + "0" ((unsigned long int)_zzq_default) \ + : "cc", "2", "3", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __GET_NR_CONTEXT_CODE \ + "lgr %0, 3\n\t" \ + : "=a" (__addr) \ + : \ + : "cc", "3", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_R1 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CALL_NO_REDIR_CODE + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __VEX_INJECT_IR_CODE); \ + } while (0) + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ---------------- */ + +#if defined(PLAT_mips32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +/* .word 0x342 + * .word 0x742 + * .word 0xC2 + * .word 0x4C2*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "srl $0, $0, 13\n\t" \ + "srl $0, $0, 29\n\t" \ + "srl $0, $0, 3\n\t" \ + "srl $0, $0, 19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* T3 = client_request ( T4 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %t9 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%t9 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- mips64-linux ---------------- */ + +#if defined(PLAT_mips64_linux) + +typedef + struct { + unsigned long nraddr; /* where's the code? */ + } + OrigFn; + +/* dsll $0,$0, 3 + * dsll $0,$0, 13 + * dsll $0,$0, 29 + * dsll $0,$0, 19*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ + "dsll $0,$0,29 ; dsll $0,$0,19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = client_request ( $12 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +#if defined(PLAT_nanomips_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; +/* + 8000 c04d srl zero, zero, 13 + 8000 c05d srl zero, zero, 29 + 8000 c043 srl zero, zero, 3 + 8000 c053 srl zero, zero, 19 +*/ + +#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \ + "srl[32] $zero, $zero, 29 \n\t" \ + "srl[32] $zero, $zero, 3 \n\t" \ + "srl[32] $zero, $zero, 19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $a7, %1\n\t" /* default */ \ + "move $t0, %2\n\t" /* ptr */ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = client_request( $t0 ) */ \ + "or[32] $t0, $t0, $t0\n\t" \ + "move %0, $a7\n\t" /* result */ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$a7", "$t0", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = guest_NRADDR */ \ + "or[32] $t1, $t1, $t1\n\t" \ + "move %0, $a7" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$a7"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or[32] $t2, $t2, $t2\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or[32] $t3, $t3, $t3\n\t" \ + ); \ + } while (0) + +#endif +/* Insert assembly code for other platforms here... */ + +#endif /* NVALGRIND */ + + +/* ------------------------------------------------------------------ */ +/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ +/* ugly. It's the least-worst tradeoff I can think of. */ +/* ------------------------------------------------------------------ */ + +/* This section defines magic (a.k.a appalling-hack) macros for doing + guaranteed-no-redirection macros, so as to get from function + wrappers to the functions they are wrapping. The whole point is to + construct standard call sequences, but to do the call itself with a + special no-redirect call pseudo-instruction that the JIT + understands and handles specially. This section is long and + repetitious, and I can't see a way to make it shorter. + + The naming scheme is as follows: + + CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} + + 'W' stands for "word" and 'v' for "void". Hence there are + different macros for calling arity 0, 1, 2, 3, 4, etc, functions, + and for each, the possibility of returning a word-typed result, or + no result. +*/ + +/* Use these to write the name of your wrapper. NOTE: duplicates + VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts + the default behaviour equivalance class tag "0000" into the name. + See pub_tool_redir.h for details -- normally you don't need to + think about this, though. */ + +/* Use an extra level of macroisation so as to ensure the soname/fnname + args are fully macro-expanded before pasting them together. */ +#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd + +#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) + +#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) + +/* Use this macro from within a wrapper function to collect the + context (address and possibly other info) of the original function. + Once you have that you can then use it in one of the CALL_FN_ + macros. The type of the argument _lval is OrigFn. */ +#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) + +/* Also provide end-user facilities for function replacement, rather + than wrapping. A replacement function differs from a wrapper in + that it has no way to get hold of the original function being + called, and hence no way to call onwards to it. In a replacement + function, VALGRIND_GET_ORIG_FN always returns zero. */ + +#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) + +#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) + +/* Derivatives of the main macros below, for calling functions + returning void. */ + +#define CALL_FN_v_v(fnptr) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_v(_junk,fnptr); } while (0) + +#define CALL_FN_v_W(fnptr, arg1) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_W(_junk,fnptr,arg1); } while (0) + +#define CALL_FN_v_WW(fnptr, arg1,arg2) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) + +#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) + +#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) + +#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) + +#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) + +#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd) + +/* These regs are trashed by the hidden call. No need to mention eax + as gcc can already see that, plus causes gcc to bomb. */ +#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movl %%esp,%%edi\n\t" \ + "andl $0xfffffff0,%%esp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movl %%edi,%%esp\n\t" + +/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 48(%%eax)\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ + +/* ---------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) || defined(PLAT_amd64_freebsd) + +/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ + "rdi", "r8", "r9", "r10", "r11" + +/* This is all pretty complex. It's so as to make stack unwinding + work reliably. See bug 243270. The basic problem is the sub and + add of 128 of %rsp in all of the following macros. If gcc believes + the CFA is in %rsp, then unwinding may fail, because what's at the + CFA is not what gcc "expected" when it constructs the CFIs for the + places where the macros are instantiated. + + But we can't just add a CFI annotation to increase the CFA offset + by 128, to match the sub of 128 from %rsp, because we don't know + whether gcc has chosen %rsp as the CFA at that point, or whether it + has chosen some other register (eg, %rbp). In the latter case, + adding a CFI annotation to change the CFA offset is simply wrong. + + So the solution is to get hold of the CFA using + __builtin_dwarf_cfa(), put it in a known register, and add a + CFI annotation to say what the register is. We choose %rbp for + this (perhaps perversely), because: + + (1) %rbp is already subject to unwinding. If a new register was + chosen then the unwinder would have to unwind it in all stack + traces, which is expensive, and + + (2) %rbp is already subject to precise exception updates in the + JIT. If a new register was chosen, we'd have to have precise + exceptions for it too, which reduces performance of the + generated code. + + However .. one extra complication. We can't just whack the result + of __builtin_dwarf_cfa() into %rbp and then add %rbp to the + list of trashed registers at the end of the inline assembly + fragments; gcc won't allow %rbp to appear in that list. Hence + instead we need to stash %rbp in %r15 for the duration of the asm, + and say that %r15 is trashed instead. gcc seems happy to go with + that. + + Oh .. and this all needs to be conditionalised so that it is + unchanged from before this commit, when compiled with older gccs + that don't support __builtin_dwarf_cfa. Furthermore, since + this header file is freestanding, it has to be independent of + config.h, and so the following conditionalisation cannot depend on + configure time checks. + + Although it's not clear from + 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', + this expression excludes Darwin. + .cfi directives in Darwin assembly appear to be completely + different and I haven't investigated how they work. + + For even more entertainment value, note we have to use the + completely undocumented __builtin_dwarf_cfa(), which appears to + really compute the CFA, whereas __builtin_frame_address(0) claims + to but actually doesn't. See + https://bugs.kde.org/show_bug.cgi?id=243270#c47 +*/ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"r"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + "movq %%rbp, %%r15\n\t" \ + "movq %2, %%rbp\n\t" \ + ".cfi_remember_state\n\t" \ + ".cfi_def_cfa rbp, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "movq %%r15, %%rbp\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movq %%rsp,%%r14\n\t" \ + "andq $0xfffffffffffffff0,%%rsp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movq %%r14,%%rsp\n\t" + +/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned + long) == 8. */ + +/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ + macros. In order not to trash the stack redzone, we need to drop + %rsp by 128 before the hidden call, and restore afterwards. The + nastyness is that it is only by luck that the stack still appears + to be unwindable during the hidden call - since then the behaviour + of any routine using this macro does not match what the CFI data + says. Sigh. + + Why is this important? Imagine that a wrapper has a stack + allocated local, and passes to the hidden call, a pointer to it. + Because gcc does not know about the hidden call, it may allocate + that local in the redzone. Unfortunately the hidden call may then + trash it before it comes to use it. So we must step clear of the + redzone, for the duration of the hidden call, to make it safe. + + Probably the same problem afflicts the other redzone-style ABIs too + (ppc64-linux); but for those, the stack is + self describing (none of this CFI nonsense) so at least messing + with the stack pointer doesn't give a danger of non-unwindable + stack. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 96(%%rax)\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +/* This is useful for finding out about the on-stack stuff: + + extern int f9 ( int,int,int,int,int,int,int,int,int ); + extern int f10 ( int,int,int,int,int,int,int,int,int,int ); + extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); + extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); + + int g9 ( void ) { + return f9(11,22,33,44,55,66,77,88,99); + } + int g10 ( void ) { + return f10(11,22,33,44,55,66,77,88,99,110); + } + int g11 ( void ) { + return f11(11,22,33,44,55,66,77,88,99,110,121); + } + int g12 ( void ) { + return f12(11,22,33,44,55,66,77,88,99,110,121,132); + } +*/ + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rlwinm 1,1,0,0,27\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc32-linux, + sizeof(unsigned long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,20(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +/* ------------------------- ppc64le-linux ----------------------- */ +#if defined(PLAT_ppc64le_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(12)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +/* This is a bit tricky. We store the original stack pointer in r10 + as it is callee-saves. gcc doesn't allow the use of r11 for some + reason. Also, we can't directly "bic" the stack pointer in thumb + mode since r13 isn't an allowed register number in that context. + So use r4 as a temporary, since that is about to get trashed + anyway, just after each use of this macro. Side effect is we need + to be very careful about any future changes, since + VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ +#define VALGRIND_ALIGN_STACK \ + "mov r10, sp\n\t" \ + "mov r4, sp\n\t" \ + "bic r4, r4, #7\n\t" \ + "mov sp, r4\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, r10\n\t" + +/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "push {r0, r1, r2, r3} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "ldr r2, [%1, #48] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------ */ + +#if defined(PLAT_arm64_linux) || defined(PLAT_arm64_freebsd) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ + "x18", "x19", "x20", "x30", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ + "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ + "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ + "v26", "v27", "v28", "v29", "v30", "v31" + +/* x21 is callee-saved, so we can use it to save and restore SP around + the hidden call. */ +#define VALGRIND_ALIGN_STACK \ + "mov x21, sp\n\t" \ + "bic sp, x21, #15\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, x21\n\t" + +/* These CALL_FN_ macros assume that on arm64-linux, + sizeof(unsigned long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11, \ + arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1, #96] \n\t" \ + "str x8, [sp, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------- s390x-linux ------------------------- */ + +#if defined(PLAT_s390x_linux) + +/* Similar workaround as amd64 (see above), but we use r11 as frame + pointer and save the old r11 in r7. r11 might be used for + argvec, therefore we copy argvec in r1 since r1 is clobbered + after the call anyway. */ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"d"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + ".cfi_remember_state\n\t" \ + "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ + "lgr 7,11\n\t" \ + "lgr 11,%2\n\t" \ + ".cfi_def_cfa 11, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "lgr 11, 7\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE \ + "lgr 1,%1\n\t" +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Nb: On s390 the stack pointer is properly aligned *at all times* + according to the s390 GCC maintainer. (The ABI specification is not + precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and + VALGRIND_RESTORE_STACK are not defined here. */ + +/* These regs are trashed by the hidden call. Note that we overwrite + r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the + function a proper return address. All others are ABI defined call + clobbers. */ +#if defined(__VX__) || defined(__S390_VX__) +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +#else +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" +#endif + +/* Nb: Although r11 is modified in the asm snippets below (inside + VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for + two reasons: + (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not + modified + (2) GCC will complain that r11 cannot appear inside a clobber section, + when compiled with -O -fno-omit-frame-pointer + */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 1, 0(1)\n\t" /* target->r1 */ \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +/* The call abi has the arguments in r2-r6 and stack */ +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1, arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-168\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,168\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-176\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,176\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-184\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,184\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-192\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,192\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-200\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,200\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-208\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,208\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-216\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "mvc 208(8,15), 96(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,216\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ----------------------- */ + +#if defined(PLAT_mips32_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16\n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" /* arg1*/ \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 24\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 24 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "nop\n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 56\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 48(%1) \n\t" \ + "sw $4, 44($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 56 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- nanomips-linux -------------------- */ + +#if defined(PLAT_nanomips_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \ +"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \ +"$t8","$t9", "$at" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + "lw $a7,32(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9,48(%1) \n\t" \ + "sw $t9,12($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_nanomips_linux */ + +/* ------------------------- mips64-linux ------------------------- */ + +#if defined(PLAT_mips64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips64-linux, + sizeof(long long) == 8. */ + +#define MIPS64_LONG2REG_CAST(x) ((long long)(long)x) + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[1]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + __asm__ volatile( \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[2]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" /* arg1*/ \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[3]; \ + volatile unsigned long long _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[4]; \ + volatile unsigned long long _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[5]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[6]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[7]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[8]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[9]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[10]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + __asm__ volatile( \ + "dsubu $29, $29, 8\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 8\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[11]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + __asm__ volatile( \ + "dsubu $29, $29, 16\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 16\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[12]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + __asm__ volatile( \ + "dsubu $29, $29, 24\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 24\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[13]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \ + __asm__ volatile( \ + "dsubu $29, $29, 32\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 96(%1)\n\t" \ + "sd $4, 24($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 32\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ +/* */ +/* ------------------------------------------------------------------ */ + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. + + Core ones are in the range 0x00000000--0x0000ffff. The non-public + ones start at 0x2000. +*/ + +/* These macros are used by tools -- they must be public, but don't + embed them into other programs. */ +#define VG_USERREQ_TOOL_BASE(a,b) \ + ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) +#define VG_IS_TOOL_USERREQ(a, b, v) \ + (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end of the most + relevant group. */ +typedef + enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, + VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, + + /* These allow any function to be called from the simulated + CPU but run on the real CPU. Nb: the first arg passed to + the function is always the ThreadId of the running + thread! So CLIENT_CALL0 actually requires a 1 arg + function, etc. */ + VG_USERREQ__CLIENT_CALL0 = 0x1101, + VG_USERREQ__CLIENT_CALL1 = 0x1102, + VG_USERREQ__CLIENT_CALL2 = 0x1103, + VG_USERREQ__CLIENT_CALL3 = 0x1104, + + /* Can be useful in regression testing suites -- eg. can + send Valgrind's output to /dev/null and still count + errors. */ + VG_USERREQ__COUNT_ERRORS = 0x1201, + + /* Allows the client program and/or gdbserver to execute a monitor + command. */ + VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, + + /* Allows the client program to change a dynamic command line + option. */ + VG_USERREQ__CLO_CHANGE = 0x1203, + + /* These are useful and can be interpreted by any tool that + tracks malloc() et al, by using vg_replace_malloc.c. */ + VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, + VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, + VG_USERREQ__FREELIKE_BLOCK = 0x1302, + /* Memory pool support. */ + VG_USERREQ__CREATE_MEMPOOL = 0x1303, + VG_USERREQ__DESTROY_MEMPOOL = 0x1304, + VG_USERREQ__MEMPOOL_ALLOC = 0x1305, + VG_USERREQ__MEMPOOL_FREE = 0x1306, + VG_USERREQ__MEMPOOL_TRIM = 0x1307, + VG_USERREQ__MOVE_MEMPOOL = 0x1308, + VG_USERREQ__MEMPOOL_CHANGE = 0x1309, + VG_USERREQ__MEMPOOL_EXISTS = 0x130a, + + /* Allow printfs to valgrind log. */ + /* The first two pass the va_list argument by value, which + assumes it is the same size as or smaller than a UWord, + which generally isn't the case. Hence are deprecated. + The second two pass the vargs by reference and so are + immune to this problem. */ + /* both :: char* fmt, va_list vargs (DEPRECATED) */ + VG_USERREQ__PRINTF = 0x1401, + VG_USERREQ__PRINTF_BACKTRACE = 0x1402, + /* both :: char* fmt, va_list* vargs */ + VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, + + /* Stack support. */ + VG_USERREQ__STACK_REGISTER = 0x1501, + VG_USERREQ__STACK_DEREGISTER = 0x1502, + VG_USERREQ__STACK_CHANGE = 0x1503, + + /* Wine support */ + VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, + + /* Querying of debug info. */ + VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, + + /* Disable/enable error reporting level. Takes a single + Word arg which is the delta to this thread's error + disablement indicator. Hence 1 disables or further + disables errors, and -1 moves back towards enablement. + Other values are not allowed. */ + VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, + + /* Some requests used for Valgrind internal, such as + self-test or self-hosting. */ + /* Initialise IR injection */ + VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901, + /* Used by Inner Valgrind to inform Outer Valgrind where to + find the list of inner guest threads */ + VG_USERREQ__INNER_THREADS = 0x1902 + } Vg_ClientRequest; + +#if !defined(__GNUC__) +# define __extension__ /* */ +#endif + + +/* Returns the number of Valgrinds this code is running under. That + is, 0 if running natively, 1 if running under Valgrind, 2 if + running under Valgrind which is running under another Valgrind, + etc. */ +#define RUNNING_ON_VALGRIND \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0, 0) \ + + +/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + + _qzz_len - 1]. Useful if you are debugging a JITter or some such, + since it provides a way to make sure valgrind will retranslate the + invalidated area. Returns no value. */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0) + +#define VALGRIND_INNER_THREADS(_qzz_addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \ + _qzz_addr, 0, 0, 0, 0) + + +/* These requests are for getting Valgrind itself to print something. + Possibly with a backtrace. This is a really ugly hack. The return value + is the number of characters printed, excluding the "**** " part at the + start and the backtrace (if present). */ + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +/* Modern GCC will optimize the static routine out if unused, + and unused attribute will shut down warnings about it. */ +static int VALGRIND_PRINTF(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF_BACKTRACE(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + + +/* These requests allow control to move from the simulated CPU to the + real CPU, calling an arbitrary function. + + Note that the current ThreadId is inserted as the first argument. + So this call: + + VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) + + requires f to have this signature: + + Word f(Word tid, Word arg1, Word arg2) + + where "Word" is a word-sized type. + + Note that these client requests are not entirely reliable. For example, + if you call a function with them that subsequently calls printf(), + there's a high chance Valgrind will crash. Generally, your prospects of + these working are made higher if the called function does not refer to + any global variables, and does not refer to any libc or other functions + (printf et al). Any kind of entanglement with libc or dynamic linking is + likely to have a bad outcome, for tricky reasons which we've grappled + with a lot in the past. +*/ +#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL0, \ + _qyy_fn, \ + 0, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL1, \ + _qyy_fn, \ + _qyy_arg1, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL2, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, 0, 0) + +#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL3, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, \ + _qyy_arg3, 0) + + +/* Counts the number of errors that have been recorded by a tool. Nb: + the tool must record the errors with VG_(maybe_record_error)() or + VG_(unique_error)() for them to be counted. */ +#define VALGRIND_COUNT_ERRORS \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + 0 /* default return */, \ + VG_USERREQ__COUNT_ERRORS, \ + 0, 0, 0, 0, 0) + +/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing + when heap blocks are allocated in order to give accurate results. This + happens automatically for the standard allocator functions such as + malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, + delete[], etc. + + But if your program uses a custom allocator, this doesn't automatically + happen, and Valgrind will not do as well. For example, if you allocate + superblocks with mmap() and then allocates chunks of the superblocks, all + Valgrind's observations will be at the mmap() level and it won't know that + the chunks should be considered separate entities. In Memcheck's case, + that means you probably won't get heap block overrun detection (because + there won't be redzones marked as unaddressable) and you definitely won't + get any leak detection. + + The following client requests allow a custom allocator to be annotated so + that it can be handled accurately by Valgrind. + + VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated + by a malloc()-like function. For Memcheck (an illustrative case), this + does two things: + + - It records that the block has been allocated. This means any addresses + within the block mentioned in error messages will be + identified as belonging to the block. It also means that if the block + isn't freed it will be detected by the leak checker. + + - It marks the block as being addressable and undefined (if 'is_zeroed' is + not set), or addressable and defined (if 'is_zeroed' is set). This + controls how accesses to the block by the program are handled. + + 'addr' is the start of the usable block (ie. after any + redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator + can apply redzones -- these are blocks of padding at the start and end of + each block. Adding redzones is recommended as it makes it much more likely + Valgrind will spot block overruns. `is_zeroed' indicates if the memory is + zeroed (or filled with another predictable value), as is the case for + calloc(). + + VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a + heap block -- that will be used by the client program -- is allocated. + It's best to put it at the outermost level of the allocator if possible; + for example, if you have a function my_alloc() which calls + internal_alloc(), and the client request is put inside internal_alloc(), + stack traces relating to the heap block will contain entries for both + my_alloc() and internal_alloc(), which is probably not what you want. + + For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out + custom blocks from within a heap block, B, that has been allocated with + malloc/calloc/new/etc, then block B will be *ignored* during leak-checking + -- the custom blocks will take precedence. + + VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For + Memcheck, it does two things: + + - It records that the block has been deallocated. This assumes that the + block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - It marks the block as being unaddressable. + + VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a + heap block is deallocated. + + VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For + Memcheck, it does four things: + + - It records that the size of a block has been changed. This assumes that + the block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - If the block shrunk, it marks the freed memory as being unaddressable. + + - If the block grew, it marks the new area as undefined and defines a red + zone past the end of the new block. + + - The V-bits of the overlap between the old and the new block are preserved. + + VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block + and before deallocation of the old block. + + In many cases, these three client requests will not be enough to get your + allocator working well with Memcheck. More specifically, if your allocator + writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call + will be necessary to mark the memory as addressable just before the zeroing + occurs, otherwise you'll get a lot of invalid write errors. For example, + you'll need to do this if your allocator recycles freed blocks, but it + zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). + Alternatively, if your allocator reuses freed blocks for allocator-internal + data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. + + Really, what's happening is a blurring of the lines between the client + program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the + memory should be considered unaddressable to the client program, but the + allocator knows more than the rest of the client program and so may be able + to safely access it. Extra client requests are necessary for Valgrind to + understand the distinction between the allocator and the rest of the + program. + + Ignored if addr == 0. +*/ +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ + addr, sizeB, rzB, is_zeroed, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ + addr, oldSizeB, newSizeB, rzB, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ + addr, rzB, 0, 0, 0) + +/* Create a memory pool. */ +#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, 0, 0) + +/* Create a memory pool with some flags specifying extended behaviour. + When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. + + The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory + associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used + by the application as superblocks to dole out MALLOC_LIKE blocks using + VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" + pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. + The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. + Note that the association between the pool and the second level blocks + is implicit : second level blocks will be located inside first level + blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag + for such 2 levels pools, as otherwise valgrind will detect overlapping + memory blocks, and will abort execution (e.g. during leak search). + + Such a meta pool can also be marked as an 'auto free' pool using the flag + VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the + VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE + will automatically free the second level blocks that are contained + inside the first level block freed with VALGRIND_MEMPOOL_FREE. + In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls + to VALGRIND_FREELIKE_BLOCK for all the second level blocks included + in the first level block. + Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag + without the VALGRIND_MEMPOOL_METAPOOL flag. +*/ +#define VALGRIND_MEMPOOL_AUTO_FREE 1 +#define VALGRIND_MEMPOOL_METAPOOL 2 +#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, flags, 0) + +/* Destroy a memory pool. */ +#define VALGRIND_DESTROY_MEMPOOL(pool) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ + pool, 0, 0, 0, 0) + +/* Associate a piece of memory with a memory pool. */ +#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ + pool, addr, size, 0, 0) + +/* Disassociate a piece of memory from a memory pool. */ +#define VALGRIND_MEMPOOL_FREE(pool, addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ + pool, addr, 0, 0, 0) + +/* Disassociate any pieces outside a particular range. */ +#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ + pool, addr, size, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ + poolA, poolB, 0, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ + pool, addrA, addrB, size, 0) + +/* Return 1 if a mempool exists, else 0. */ +#define VALGRIND_MEMPOOL_EXISTS(pool) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MEMPOOL_EXISTS, \ + pool, 0, 0, 0, 0) + +/* Mark a piece of memory as being a stack. Returns a stack id. + start is the lowest addressable stack byte, end is the highest + addressable stack byte. */ +#define VALGRIND_STACK_REGISTER(start, end) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__STACK_REGISTER, \ + start, end, 0, 0, 0) + +/* Unmark the piece of memory associated with a stack id as being a + stack. */ +#define VALGRIND_STACK_DEREGISTER(id) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ + id, 0, 0, 0, 0) + +/* Change the start and end address of the stack id. + start is the new lowest addressable stack byte, end is the new highest + addressable stack byte. */ +#define VALGRIND_STACK_CHANGE(id, start, end) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ + id, start, end, 0, 0) + +/* Load PDB debug info for Wine PE image_map. */ +#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ + fd, ptr, total_size, delta, 0) + +/* Map a code address to a source file name and line number. buf64 + must point to a 64-byte buffer in the caller's address space. The + result will be dumped in there and is guaranteed to be zero + terminated. If no info is found, the first byte is set to zero. */ +#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MAP_IP_TO_SRCLOC, \ + addr, buf64, 0, 0, 0) + +/* Disable error reporting for this thread. Behaves in a stack like + way, so you can safely call this multiple times provided that + VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times + to re-enable reporting. The first call of this macro disables + reporting. Subsequent calls have no effect except to increase the + number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable + reporting. Child threads do not inherit this setting from their + parents -- they are always created with reporting enabled. */ +#define VALGRIND_DISABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + 1, 0, 0, 0, 0) + +/* Re-enable error reporting, as per comments on + VALGRIND_DISABLE_ERROR_REPORTING. */ +#define VALGRIND_ENABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + -1, 0, 0, 0, 0) + +/* Execute a monitor command from the client program. + If a connection is opened with GDB, the output will be sent + according to the output mode set for vgdb. + If no connection is opened, output will go to the log output. + Returns 1 if command not recognised, 0 otherwise. */ +#define VALGRIND_MONITOR_COMMAND(command) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ + command, 0, 0, 0, 0) + + +/* Change the value of a dynamic command line option. + Note that unknown or not dynamically changeable options + will cause a warning message to be output. */ +#define VALGRIND_CLO_CHANGE(option) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \ + option, 0, 0, 0, 0) + + +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + +#endif /* __VALGRIND_H */ diff --git a/testing/capi/instrument-hooks/includes/zig.h b/testing/capi/instrument-hooks/includes/zig.h new file mode 100644 index 00000000..2d9e7a56 --- /dev/null +++ b/testing/capi/instrument-hooks/includes/zig.h @@ -0,0 +1,4209 @@ +#undef linux + +#include +#include + +#if defined(_MSC_VER) +#define zig_msvc +#elif defined(__clang__) +#define zig_clang +#define zig_gnuc +#elif defined(__GNUC__) +#define zig_gcc +#define zig_gnuc +#elif defined(__IBMC__) +#define zig_xlc +#elif defined(__TINYC__) +#define zig_tinyc +#elif defined(__slimcc__) +#define zig_slimcc +#endif + +#if defined(__aarch64__) || (defined(zig_msvc) && defined(_M_ARM64)) +#define zig_aarch64 +#elif defined(__thumb__) || (defined(zig_msvc) && defined(_M_ARM)) +#define zig_thumb +#define zig_arm +#elif defined(__arm__) +#define zig_arm +#elif defined(__hexagon__) +#define zig_hexagon +#elif defined(__loongarch32) +#define zig_loongarch32 +#define zig_loongarch +#elif defined(__loongarch64) +#define zig_loongarch64 +#define zig_loongarch +#elif defined(__mips64) +#define zig_mips64 +#define zig_mips +#elif defined(__mips__) +#define zig_mips32 +#define zig_mips +#elif defined(__powerpc64__) +#define zig_powerpc64 +#define zig_powerpc +#elif defined(__powerpc__) +#define zig_powerpc32 +#define zig_powerpc +#elif defined(__riscv) && __riscv_xlen == 32 +#define zig_riscv32 +#define zig_riscv +#elif defined(__riscv) && __riscv_xlen == 64 +#define zig_riscv64 +#define zig_riscv +#elif defined(__s390x__) +#define zig_s390x +#elif defined(__sparc__) && defined(__arch64__) +#define zig_sparc64 +#define zig_sparc +#elif defined(__sparc__) +#define zig_sparc32 +#define zig_sparc +#elif defined(__wasm32__) +#define zig_wasm32 +#define zig_wasm +#elif defined(__wasm64__) +#define zig_wasm64 +#define zig_wasm +#elif defined(__i386__) || (defined(zig_msvc) && defined(_M_IX86)) +#define zig_x86_32 +#define zig_x86 +#elif defined (__x86_64__) || (defined(zig_msvc) && defined(_M_X64)) +#define zig_x86_64 +#define zig_x86 +#endif + +#if defined(zig_msvc) || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define zig_little_endian 1 +#define zig_big_endian 0 +#else +#define zig_little_endian 0 +#define zig_big_endian 1 +#endif + +#if defined(_AIX) +#define zig_aix +#elif defined(__MACH__) +#define zig_darwin +#elif defined(__DragonFly__) +#define zig_dragonfly +#define zig_bsd +#elif defined(__EMSCRIPTEN__) +#define zig_emscripten +#elif defined(__FreeBSD__) +#define zig_freebsd +#define zig_bsd +#elif defined(__Fuchsia__) +#define zig_fuchsia +#elif defined(__HAIKU__) +#define zig_haiku +#elif defined(__gnu_hurd__) +#define zig_hurd +#elif defined(__linux__) +#define zig_linux +#elif defined(__NetBSD__) +#define zig_netbsd +#define zig_bsd +#elif defined(__OpenBSD__) +#define zig_openbsd +#define zig_bsd +#elif defined(__SVR4) +#define zig_solaris +#elif defined(__wasi__) +#define zig_wasi +#elif defined(_WIN32) +#define zig_windows +#elif defined(__MVS__) +#define zig_zos +#endif + +#if defined(zig_windows) +#define zig_coff +#elif defined(__ELF__) +#define zig_elf +#elif defined(zig_zos) +#define zig_goff +#elif defined(zig_darwin) +#define zig_macho +#elif defined(zig_aix) +#define zig_xcoff +#endif + +#define zig_concat(lhs, rhs) lhs##rhs +#define zig_expand_concat(lhs, rhs) zig_concat(lhs, rhs) + +#if defined(__has_include) +#define zig_has_include(include) __has_include(include) +#else +#define zig_has_include(include) 0 +#endif + +#if defined(__has_builtin) +#define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin) +#else +#define zig_has_builtin(builtin) 0 +#endif +#define zig_expand_has_builtin(b) zig_has_builtin(b) + +#if defined(__has_attribute) +#define zig_has_attribute(attribute) __has_attribute(attribute) +#else +#define zig_has_attribute(attribute) 0 +#endif + +#if __STDC_VERSION__ >= 202311L +#define zig_threadlocal thread_local +#elif __STDC_VERSION__ >= 201112L +#define zig_threadlocal _Thread_local +#elif defined(zig_gnuc) || defined(zig_slimcc) +#define zig_threadlocal __thread +#elif defined(zig_msvc) +#define zig_threadlocal __declspec(thread) +#else +#define zig_threadlocal zig_threadlocal_unavailable +#endif + +#if defined(zig_msvc) +#define zig_const_arr +#define zig_callconv(c) __##c +#else +#define zig_const_arr static const +#define zig_callconv(c) __attribute__((c)) +#endif + +#if zig_has_attribute(naked) || defined(zig_gcc) +#define zig_naked_decl __attribute__((naked)) +#define zig_naked __attribute__((naked)) +#elif defined(zig_msvc) +#define zig_naked_decl +#define zig_naked __declspec(naked) +#else +#define zig_naked_decl zig_naked_unavailable +#define zig_naked zig_naked_unavailable +#endif + +#if zig_has_attribute(cold) +#define zig_cold __attribute__((cold)) +#else +#define zig_cold +#endif + +#if zig_has_attribute(flatten) +#define zig_maybe_flatten __attribute__((flatten)) +#else +#define zig_maybe_flatten +#endif + +#if zig_has_attribute(noinline) +#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten +#elif defined(zig_msvc) +#define zig_never_inline __declspec(noinline) zig_maybe_flatten +#else +#define zig_never_inline zig_never_inline_unavailable +#endif + +#if zig_has_attribute(not_tail_called) +#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline +#else +#define zig_never_tail zig_never_tail_unavailable +#endif + +#if zig_has_attribute(musttail) +#define zig_always_tail __attribute__((musttail)) +#else +#define zig_always_tail zig_always_tail_unavailable +#endif + +#if __STDC_VERSION__ >= 199901L +#define zig_restrict restrict +#elif defined(zig_gnuc) || defined(zig_tinyc) +#define zig_restrict __restrict +#else +#define zig_restrict +#endif + +#if zig_has_attribute(no_builtin) +#define zig_no_builtin __attribute__((no_builtin)) +#else +#define zig_no_builtin +#endif + +#if zig_has_attribute(aligned) || defined(zig_tinyc) +#define zig_under_align(alignment) __attribute__((aligned(alignment))) +#elif defined(zig_msvc) +#define zig_under_align(alignment) __declspec(align(alignment)) +#else +#define zig_under_align zig_align_unavailable +#endif + +#if __STDC_VERSION__ >= 202311L +#define zig_align(alignment) alignas(alignment) +#elif __STDC_VERSION__ >= 201112L +#define zig_align(alignment) _Alignas(alignment) +#else +#define zig_align(alignment) zig_under_align(alignment) +#endif + +#if zig_has_attribute(aligned) || defined(zig_tinyc) +#define zig_align_fn(alignment) __attribute__((aligned(alignment))) +#elif defined(zig_msvc) +#define zig_align_fn(alignment) +#else +#define zig_align_fn zig_align_fn_unavailable +#endif + +#if zig_has_attribute(packed) || defined(zig_tinyc) +#define zig_packed(definition) __attribute__((packed)) definition +#elif defined(zig_msvc) +#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack()) +#else +#define zig_packed(definition) zig_packed_unavailable +#endif + +#if zig_has_attribute(section) || defined(zig_tinyc) +#define zig_linksection(name) __attribute__((section(name))) +#define zig_linksection_fn zig_linksection +#elif defined(zig_msvc) +#define zig_linksection(name) __pragma(section(name, read, write)) __declspec(allocate(name)) +#define zig_linksection_fn(name) __pragma(section(name, read, execute)) __declspec(code_seg(name)) +#else +#define zig_linksection(name) zig_linksection_unavailable +#define zig_linksection_fn zig_linksection +#endif + +#if zig_has_builtin(unreachable) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_unreachable() __builtin_unreachable() +#elif defined(zig_msvc) +#define zig_unreachable() __assume(0) +#else +#define zig_unreachable() +#endif + +#if defined(__cplusplus) +#define zig_extern extern "C" +#else +#define zig_extern extern +#endif + +#if defined(zig_msvc) +#if defined(zig_x86_64) +#define zig_mangle_c(symbol) symbol +#else /* zig_x86_64 */ +#define zig_mangle_c(symbol) "_" symbol +#endif /* zig_x86_64 */ +#else /* zig_msvc */ +#if defined(zig_macho) +#define zig_mangle_c(symbol) "_" symbol +#else /* zig_macho */ +#define zig_mangle_c(symbol) symbol +#endif /* zig_macho */ +#endif /* zig_msvc */ + +#if defined(zig_msvc) +#define zig_export(symbol, name) ; \ + __pragma(comment(linker, "/alternatename:" zig_mangle_c(name) "=" zig_mangle_c(symbol))) +#elif (zig_has_attribute(alias) || defined(zig_tinyc)) && !defined(zig_macho) +#define zig_export(symbol, name) __attribute__((alias(symbol))) +#else +#define zig_export(symbol, name) ; \ + __asm(zig_mangle_c(name) " = " zig_mangle_c(symbol)) +#endif + +#define zig_mangled_tentative zig_mangled +#define zig_mangled_final zig_mangled +#if defined(zig_msvc) +#define zig_mangled(mangled, unmangled) ; \ + zig_export(#mangled, unmangled) +#define zig_mangled_export(mangled, unmangled, symbol) \ + zig_export(unmangled, #mangled) \ + zig_export(symbol, unmangled) +#else /* zig_msvc */ +#define zig_mangled(mangled, unmangled) __asm(zig_mangle_c(unmangled)) +#define zig_mangled_export(mangled, unmangled, symbol) \ + zig_mangled_final(mangled, unmangled) \ + zig_export(symbol, unmangled) +#endif /* zig_msvc */ + +#if defined(zig_msvc) +#define zig_import(Type, fn_name, libc_name, sig_args, call_args) zig_extern Type fn_name sig_args;\ + __pragma(comment(linker, "/alternatename:" zig_mangle_c(#fn_name) "=" zig_mangle_c(#libc_name))); +#define zig_import_builtin(Type, fn_name, libc_name, sig_args, call_args) zig_import(Type, fn_name, libc_name, sig_args, call_args) +#else /* zig_msvc */ +#define zig_import(Type, fn_name, libc_name, sig_args, call_args) zig_extern Type fn_name sig_args __asm(zig_mangle_c(#libc_name)); +#define zig_import_builtin(Type, fn_name, libc_name, sig_args, call_args) zig_extern Type libc_name sig_args; \ + static inline Type fn_name sig_args { return libc_name call_args; } +#endif + +#define zig_expand_import_0(Type, fn_name, libc_name, sig_args, call_args) zig_import(Type, fn_name, libc_name, sig_args, call_args) +#define zig_expand_import_1(Type, fn_name, libc_name, sig_args, call_args) zig_import_builtin(Type, fn_name, libc_name, sig_args, call_args) + +#if zig_has_attribute(weak) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_weak_linkage __attribute__((weak)) +#define zig_weak_linkage_fn __attribute__((weak)) +#elif defined(zig_msvc) +#define zig_weak_linkage __declspec(selectany) +#define zig_weak_linkage_fn +#else +#define zig_weak_linkage zig_weak_linkage_unavailable +#define zig_weak_linkage_fn zig_weak_linkage_unavailable +#endif + +#if defined(zig_gnuc) || defined(zig_tinyc) || defined(zig_slimcc) +#define zig_gnuc_asm +#endif + +#if zig_has_builtin(trap) +#define zig_trap() __builtin_trap() +#elif defined(zig_msvc) + +#if defined(zig_x86) +#define zig_trap() __ud2() +#else +#define zig_trap() __fastfail(7) +#endif + +#elif defined(zig_gnuc_asm) + +#if defined(zig_thumb) +#define zig_trap() __asm__ volatile("udf #0xfe") +#elif defined(zig_arm) || defined(zig_aarch64) +#define zig_trap() __asm__ volatile("udf #0xfdee") +#elif defined(zig_hexagon) +#define zig_trap() __asm__ volatile("r27:26 = memd(#0xbadc0fee)") +#elif defined(zig_loongarch) || defined(zig_powerpc) +#define zig_trap() __asm__ volatile(".word 0x0") +#elif defined(zig_mips) +#define zig_trap() __asm__ volatile(".word 0x3d") +#elif defined(zig_riscv) +#define zig_trap() __asm__ volatile("unimp") +#elif defined(zig_s390x) +#define zig_trap() __asm__ volatile("j 0x2") +#elif defined(zig_sparc) +#define zig_trap() __asm__ volatile("illtrap") +#elif defined(zig_x86) +#define zig_trap() __asm__ volatile("ud2") +#else +#define zig_trap() zig_trap_unavailable +#endif + +#else +#define zig_trap() zig_trap_unavailable +#endif + +#if zig_has_builtin(debugtrap) +#define zig_breakpoint() __builtin_debugtrap() +#elif defined(zig_msvc) +#define zig_breakpoint() __debugbreak() +#elif defined(zig_gnuc_asm) + +#if defined(zig_arm) +#define zig_breakpoint() __asm__ volatile("bkpt #0x0") +#elif defined(zig_aarch64) +#define zig_breakpoint() __asm__ volatile("brk #0xf000") +#elif defined(zig_hexagon) +#define zig_breakpoint() __asm__ volatile("brkpt") +#elif defined(zig_loongarch) +#define zig_breakpoint() __asm__ volatile("break 0x0") +#elif defined(zig_mips) +#define zig_breakpoint() __asm__ volatile("break") +#elif defined(zig_powerpc) +#define zig_breakpoint() __asm__ volatile("trap") +#elif defined(zig_riscv) +#define zig_breakpoint() __asm__ volatile("ebreak") +#elif defined(zig_s390x) +#define zig_breakpoint() __asm__ volatile("j 0x6") +#elif defined(zig_sparc) +#define zig_breakpoint() __asm__ volatile("ta 0x1") +#elif defined(zig_x86) +#define zig_breakpoint() __asm__ volatile("int $0x3") +#else +#define zig_breakpoint() zig_breakpoint_unavailable +#endif + +#else +#define zig_breakpoint() zig_breakpoint_unavailable +#endif + +#if zig_has_builtin(return_address) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0)) +#elif defined(zig_msvc) +#define zig_return_address() _ReturnAddress() +#else +#define zig_return_address() 0 +#endif + +#if zig_has_builtin(frame_address) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_frame_address() __builtin_frame_address(0) +#elif defined(zig_msvc) +#define zig_frame_address() _AddressOfReturnAddress() +#else +#define zig_frame_address() 0 +#endif + +#if zig_has_builtin(prefetch) || defined(zig_gcc) +#define zig_prefetch(addr, rw, locality) __builtin_prefetch(addr, rw, locality) +#else +#define zig_prefetch(addr, rw, locality) +#endif + +#if zig_has_builtin(memory_size) && zig_has_builtin(memory_grow) +#define zig_wasm_memory_size(index) __builtin_wasm_memory_size(index) +#define zig_wasm_memory_grow(index, delta) __builtin_wasm_memory_grow(index, delta) +#else +#define zig_wasm_memory_size(index) zig_unimplemented() +#define zig_wasm_memory_grow(index, delta) zig_unimplemented() +#endif + +#if __STDC_VERSION__ >= 202311L +#define zig_noreturn [[noreturn]] +#elif __STDC_VERSION__ >= 201112L +#define zig_noreturn _Noreturn +#elif zig_has_attribute(noreturn) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_noreturn __attribute__((noreturn)) +#elif defined(zig_msvc) +#define zig_noreturn __declspec(noreturn) +#else +#define zig_noreturn +#endif + +#define zig_compiler_rt_abbrev_uint32_t si +#define zig_compiler_rt_abbrev_int32_t si +#define zig_compiler_rt_abbrev_uint64_t di +#define zig_compiler_rt_abbrev_int64_t di +#define zig_compiler_rt_abbrev_zig_u128 ti +#define zig_compiler_rt_abbrev_zig_i128 ti +#define zig_compiler_rt_abbrev_zig_f16 hf +#define zig_compiler_rt_abbrev_zig_f32 sf +#define zig_compiler_rt_abbrev_zig_f64 df +#define zig_compiler_rt_abbrev_zig_f80 xf +#define zig_compiler_rt_abbrev_zig_f128 tf + +zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t); +zig_extern void *memset (void *, int, size_t); + +/* ================ Bool and 8/16/32/64-bit Integer Support ================= */ + +#include + +#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) + +#if __STDC_VERSION__ >= 202311L +/* bool, true, and false are provided by the language. */ +#elif __STDC_VERSION__ >= 199901L || zig_has_include() +#include +#else +typedef char bool; +#define false 0 +#define true 1 +#endif + +#if __STDC_VERSION__ >= 199901L || defined(zig_msvc) || zig_has_include() +#include +#else +#if SCHAR_MIN == ~0x7F && SCHAR_MAX == 0x7F && UCHAR_MAX == 0xFF +typedef unsigned char uint8_t; +typedef signed char int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif SHRT_MIN == ~0x7F && SHRT_MAX == 0x7F && USHRT_MAX == 0xFF +typedef unsigned short uint8_t; +typedef signed short int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif INT_MIN == ~0x7F && INT_MAX == 0x7F && UINT_MAX == 0xFF +typedef unsigned int uint8_t; +typedef signed int int8_t; +#define INT8_C(c) c +#define UINT8_C(c) c##U +#elif LONG_MIN == ~0x7F && LONG_MAX == 0x7F && ULONG_MAX == 0xFF +typedef unsigned long uint8_t; +typedef signed long int8_t; +#define INT8_C(c) c##L +#define UINT8_C(c) c##LU +#elif LLONG_MIN == ~0x7F && LLONG_MAX == 0x7F && ULLONG_MAX == 0xFF +typedef unsigned long long uint8_t; +typedef signed long long int8_t; +#define INT8_C(c) c##LL +#define UINT8_C(c) c##LLU +#endif +#define INT8_MIN (~INT8_C(0x7F)) +#define INT8_MAX ( INT8_C(0x7F)) +#define UINT8_MAX ( INT8_C(0xFF)) + +#if SCHAR_MIN == ~0x7FFF && SCHAR_MAX == 0x7FFF && UCHAR_MAX == 0xFFFF +typedef unsigned char uint16_t; +typedef signed char int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif SHRT_MIN == ~0x7FFF && SHRT_MAX == 0x7FFF && USHRT_MAX == 0xFFFF +typedef unsigned short uint16_t; +typedef signed short int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif INT_MIN == ~0x7FFF && INT_MAX == 0x7FFF && UINT_MAX == 0xFFFF +typedef unsigned int uint16_t; +typedef signed int int16_t; +#define INT16_C(c) c +#define UINT16_C(c) c##U +#elif LONG_MIN == ~0x7FFF && LONG_MAX == 0x7FFF && ULONG_MAX == 0xFFFF +typedef unsigned long uint16_t; +typedef signed long int16_t; +#define INT16_C(c) c##L +#define UINT16_C(c) c##LU +#elif LLONG_MIN == ~0x7FFF && LLONG_MAX == 0x7FFF && ULLONG_MAX == 0xFFFF +typedef unsigned long long uint16_t; +typedef signed long long int16_t; +#define INT16_C(c) c##LL +#define UINT16_C(c) c##LLU +#endif +#define INT16_MIN (~INT16_C(0x7FFF)) +#define INT16_MAX ( INT16_C(0x7FFF)) +#define UINT16_MAX ( INT16_C(0xFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF +typedef unsigned char uint32_t; +typedef signed char int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFF && SHRT_MAX == 0x7FFFFFFF && USHRT_MAX == 0xFFFFFFFF +typedef unsigned short uint32_t; +typedef signed short int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFF && INT_MAX == 0x7FFFFFFF && UINT_MAX == 0xFFFFFFFF +typedef unsigned int uint32_t; +typedef signed int int32_t; +#define INT32_C(c) c +#define UINT32_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFF && LONG_MAX == 0x7FFFFFFF && ULONG_MAX == 0xFFFFFFFF +typedef unsigned long uint32_t; +typedef signed long int32_t; +#define INT32_C(c) c##L +#define UINT32_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFF && LLONG_MAX == 0x7FFFFFFF && ULLONG_MAX == 0xFFFFFFFF +typedef unsigned long long uint32_t; +typedef signed long long int32_t; +#define INT32_C(c) c##LL +#define UINT32_C(c) c##LLU +#endif +#define INT32_MIN (~INT32_C(0x7FFFFFFF)) +#define INT32_MAX ( INT32_C(0x7FFFFFFF)) +#define UINT32_MAX ( INT32_C(0xFFFFFFFF)) + +#if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned char uint64_t; +typedef signed char int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif SHRT_MIN == ~0x7FFFFFFFFFFFFFFF && SHRT_MAX == 0x7FFFFFFFFFFFFFFF && USHRT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned short uint64_t; +typedef signed short int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif INT_MIN == ~0x7FFFFFFFFFFFFFFF && INT_MAX == 0x7FFFFFFFFFFFFFFF && UINT_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned int uint64_t; +typedef signed int int64_t; +#define INT64_C(c) c +#define UINT64_C(c) c##U +#elif LONG_MIN == ~0x7FFFFFFFFFFFFFFF && LONG_MAX == 0x7FFFFFFFFFFFFFFF && ULONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long uint64_t; +typedef signed long int64_t; +#define INT64_C(c) c##L +#define UINT64_C(c) c##LU +#elif LLONG_MIN == ~0x7FFFFFFFFFFFFFFF && LLONG_MAX == 0x7FFFFFFFFFFFFFFF && ULLONG_MAX == 0xFFFFFFFFFFFFFFFF +typedef unsigned long long uint64_t; +typedef signed long long int64_t; +#define INT64_C(c) c##LL +#define UINT64_C(c) c##LLU +#endif +#define INT64_MIN (~INT64_C(0x7FFFFFFFFFFFFFFF)) +#define INT64_MAX ( INT64_C(0x7FFFFFFFFFFFFFFF)) +#define UINT64_MAX ( INT64_C(0xFFFFFFFFFFFFFFFF)) + +typedef size_t uintptr_t; +typedef ptrdiff_t intptr_t; + +#endif + +#define zig_minInt_i8 INT8_MIN +#define zig_maxInt_i8 INT8_MAX +#define zig_minInt_u8 UINT8_C(0) +#define zig_maxInt_u8 UINT8_MAX +#define zig_minInt_i16 INT16_MIN +#define zig_maxInt_i16 INT16_MAX +#define zig_minInt_u16 UINT16_C(0) +#define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_i32 INT32_MIN +#define zig_maxInt_i32 INT32_MAX +#define zig_minInt_u32 UINT32_C(0) +#define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_i64 INT64_MIN +#define zig_maxInt_i64 INT64_MAX +#define zig_minInt_u64 UINT64_C(0) +#define zig_maxInt_u64 UINT64_MAX + +#define zig_intLimit(s, w, limit, bits) zig_shr_##s##w(zig_##limit##Int_##s##w, w - (bits)) +#define zig_minInt_i(w, bits) zig_intLimit(i, w, min, bits) +#define zig_maxInt_i(w, bits) zig_intLimit(i, w, max, bits) +#define zig_minInt_u(w, bits) zig_intLimit(u, w, min, bits) +#define zig_maxInt_u(w, bits) zig_intLimit(u, w, max, bits) + +#define zig_operator(Type, RhsType, operation, operator) \ + static inline Type zig_##operation(Type lhs, RhsType rhs) { \ + return lhs operator rhs; \ + } +#define zig_basic_operator(Type, operation, operator) \ + zig_operator(Type, Type, operation, operator) +#define zig_shift_operator(Type, operation, operator) \ + zig_operator(Type, uint8_t, operation, operator) +#define zig_int_helpers(w, PromotedUnsigned) \ + zig_basic_operator(uint##w##_t, and_u##w, &) \ + zig_basic_operator( int##w##_t, and_i##w, &) \ + zig_basic_operator(uint##w##_t, or_u##w, |) \ + zig_basic_operator( int##w##_t, or_i##w, |) \ + zig_basic_operator(uint##w##_t, xor_u##w, ^) \ + zig_basic_operator( int##w##_t, xor_i##w, ^) \ + zig_shift_operator(uint##w##_t, shl_u##w, <<) \ + zig_shift_operator( int##w##_t, shl_i##w, <<) \ + zig_shift_operator(uint##w##_t, shr_u##w, >>) \ +\ + static inline int##w##_t zig_shr_i##w(int##w##_t lhs, uint8_t rhs) { \ + int##w##_t sign_mask = lhs < INT##w##_C(0) ? -INT##w##_C(1) : INT##w##_C(0); \ + return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \ + } \ +\ + static inline uint##w##_t zig_not_u##w(uint##w##_t val, uint8_t bits) { \ + return val ^ zig_maxInt_u(w, bits); \ + } \ +\ + static inline int##w##_t zig_not_i##w(int##w##_t val, uint8_t bits) { \ + (void)bits; \ + return ~val; \ + } \ +\ + static inline uint##w##_t zig_wrap_u##w(uint##w##_t val, uint8_t bits) { \ + return val & zig_maxInt_u(w, bits); \ + } \ +\ + static inline int##w##_t zig_wrap_i##w(int##w##_t val, uint8_t bits) { \ + return (val & UINT##w##_C(1) << (bits - UINT8_C(1))) != 0 \ + ? val | zig_minInt_i(w, bits) : val & zig_maxInt_i(w, bits); \ + } \ +\ + static inline uint##w##_t zig_abs_i##w(int##w##_t val) { \ + return (val < 0) ? -(uint##w##_t)val : (uint##w##_t)val; \ + } \ +\ + zig_basic_operator(uint##w##_t, div_floor_u##w, /) \ +\ + static inline int##w##_t zig_div_floor_i##w(int##w##_t lhs, int##w##_t rhs) { \ + return lhs / rhs + (lhs % rhs != INT##w##_C(0) ? zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \ + } \ +\ + zig_basic_operator(uint##w##_t, mod_u##w, %) \ +\ + static inline int##w##_t zig_mod_i##w(int##w##_t lhs, int##w##_t rhs) { \ + int##w##_t rem = lhs % rhs; \ + return rem + (rem != INT##w##_C(0) ? rhs & zig_shr_i##w(lhs ^ rhs, UINT8_C(w) - UINT8_C(1)) : INT##w##_C(0)); \ + } \ +\ + static inline uint##w##_t zig_shlw_u##w(uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \ + } \ +\ + static inline int##w##_t zig_shlw_i##w(int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)zig_shl_u##w((uint##w##_t)lhs, rhs), bits); \ + } \ +\ + static inline uint##w##_t zig_addw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + return zig_wrap_u##w(lhs + rhs, bits); \ + } \ +\ + static inline int##w##_t zig_addw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs + (uint##w##_t)rhs), bits); \ + } \ +\ + static inline uint##w##_t zig_subw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + return zig_wrap_u##w(lhs - rhs, bits); \ + } \ +\ + static inline int##w##_t zig_subw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs - (uint##w##_t)rhs), bits); \ + } \ +\ + static inline uint##w##_t zig_mulw_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + return zig_wrap_u##w((PromotedUnsigned)lhs * rhs, bits); \ + } \ +\ + static inline int##w##_t zig_mulw_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + return zig_wrap_i##w((int##w##_t)((uint##w##_t)lhs * (uint##w##_t)rhs), bits); \ + } +#if UINT8_MAX <= UINT_MAX +zig_int_helpers(8, unsigned int) +#elif UINT8_MAX <= ULONG_MAX +zig_int_helpers(8, unsigned long) +#elif UINT8_MAX <= ULLONG_MAX +zig_int_helpers(8, unsigned long long) +#else +zig_int_helpers(8, uint8_t) +#endif +#if UINT16_MAX <= UINT_MAX +zig_int_helpers(16, unsigned int) +#elif UINT16_MAX <= ULONG_MAX +zig_int_helpers(16, unsigned long) +#elif UINT16_MAX <= ULLONG_MAX +zig_int_helpers(16, unsigned long long) +#else +zig_int_helpers(16, uint16_t) +#endif +#if UINT32_MAX <= UINT_MAX +zig_int_helpers(32, unsigned int) +#elif UINT32_MAX <= ULONG_MAX +zig_int_helpers(32, unsigned long) +#elif UINT32_MAX <= ULLONG_MAX +zig_int_helpers(32, unsigned long long) +#else +zig_int_helpers(32, uint32_t) +#endif +#if UINT64_MAX <= UINT_MAX +zig_int_helpers(64, unsigned int) +#elif UINT64_MAX <= ULONG_MAX +zig_int_helpers(64, unsigned long) +#elif UINT64_MAX <= ULLONG_MAX +zig_int_helpers(64, unsigned long long) +#else +zig_int_helpers(64, uint64_t) +#endif + +static inline bool zig_addo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint32_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); +#else + *res = zig_addw_u32(lhs, rhs, bits); + return *res < lhs; +#endif +} + +zig_extern int32_t __addosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_addo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int32_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int32_t full_res = __addosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); +} + +static inline bool zig_addo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint64_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); +#else + *res = zig_addw_u64(lhs, rhs, bits); + return *res < lhs; +#endif +} + +zig_extern int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_addo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int64_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int64_t full_res = __addodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); +} + +static inline bool zig_addo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint8_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); +#else + uint32_t full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (uint8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_addo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int8_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); +#else + int32_t full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (int8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_addo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint16_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); +#else + uint32_t full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (uint16_t)full_res; + return overflow; +#endif +} + +static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int16_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); +#else + int32_t full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (int16_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint32_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); +#else + *res = zig_subw_u32(lhs, rhs, bits); + return *res > lhs; +#endif +} + +zig_extern int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int32_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int32_t full_res = __subosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); +} + +static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint64_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); +#else + *res = zig_subw_u64(lhs, rhs, bits); + return *res > lhs; +#endif +} + +zig_extern int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_subo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int64_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int64_t full_res = __subodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); +} + +static inline bool zig_subo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint8_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); +#else + uint32_t full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (uint8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int8_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); +#else + int32_t full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (int8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint16_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); +#else + uint32_t full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (uint16_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int16_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); +#else + int32_t full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (int16_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint32_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u32(full_res, bits); + return overflow || full_res < zig_minInt_u(32, bits) || full_res > zig_maxInt_u(32, bits); +#else + *res = zig_mulw_u32(lhs, rhs, bits); + return rhs != UINT32_C(0) && lhs > zig_maxInt_u(32, bits) / rhs; +#endif +} + +zig_extern int32_t __mulosi4(int32_t lhs, int32_t rhs, int *overflow); +static inline bool zig_mulo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int32_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int32_t full_res = __mulosi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i32(full_res, bits); + return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); +} + +static inline bool zig_mulo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint64_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u64(full_res, bits); + return overflow || full_res < zig_minInt_u(64, bits) || full_res > zig_maxInt_u(64, bits); +#else + *res = zig_mulw_u64(lhs, rhs, bits); + return rhs != UINT64_C(0) && lhs > zig_maxInt_u(64, bits) / rhs; +#endif +} + +zig_extern int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow); +static inline bool zig_mulo_i64(int64_t *res, int64_t lhs, int64_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int64_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + int64_t full_res = __mulodi4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i64(full_res, bits); + return overflow || full_res < zig_minInt_i(64, bits) || full_res > zig_maxInt_i(64, bits); +} + +static inline bool zig_mulo_u8(uint8_t *res, uint8_t lhs, uint8_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint8_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u8(full_res, bits); + return overflow || full_res < zig_minInt_u(8, bits) || full_res > zig_maxInt_u(8, bits); +#else + uint32_t full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (uint8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_i8(int8_t *res, int8_t lhs, int8_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int8_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i8(full_res, bits); + return overflow || full_res < zig_minInt_i(8, bits) || full_res > zig_maxInt_i(8, bits); +#else + int32_t full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (int8_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_u16(uint16_t *res, uint16_t lhs, uint16_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint16_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u16(full_res, bits); + return overflow || full_res < zig_minInt_u(16, bits) || full_res > zig_maxInt_u(16, bits); +#else + uint32_t full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (uint16_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int16_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i16(full_res, bits); + return overflow || full_res < zig_minInt_i(16, bits) || full_res > zig_maxInt_i(16, bits); +#else + int32_t full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (int16_t)full_res; + return overflow; +#endif +} + +#define zig_int_builtins(w) \ + static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + *res = zig_shlw_u##w(lhs, rhs, bits); \ + return lhs > zig_maxInt_u(w, bits) >> rhs; \ + } \ +\ + static inline bool zig_shlo_i##w(int##w##_t *res, int##w##_t lhs, uint8_t rhs, uint8_t bits) { \ + *res = zig_shlw_i##w(lhs, rhs, bits); \ + int##w##_t mask = (int##w##_t)(UINT##w##_MAX << (bits - rhs - 1)); \ + return (lhs & mask) != INT##w##_C(0) && (lhs & mask) != mask; \ + } \ +\ + static inline uint##w##_t zig_shls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + if (rhs >= bits) return lhs != UINT##w##_C(0) ? zig_maxInt_u(w, bits) : lhs; \ + return zig_shlo_u##w(&res, lhs, (uint8_t)rhs, bits) ? zig_maxInt_u(w, bits) : res; \ + } \ +\ + static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \ + return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ + } \ +\ + static inline uint##w##_t zig_adds_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ + } \ +\ + static inline int##w##_t zig_adds_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ + } \ +\ + static inline uint##w##_t zig_subs_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt_u(w, bits) : res; \ + } \ +\ + static inline int##w##_t zig_subs_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \ + return res >= INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ + } \ +\ + static inline uint##w##_t zig_muls_u##w(uint##w##_t lhs, uint##w##_t rhs, uint8_t bits) { \ + uint##w##_t res; \ + return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt_u(w, bits) : res; \ + } \ +\ + static inline int##w##_t zig_muls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \ + int##w##_t res; \ + if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \ + return (lhs ^ rhs) < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \ + } +zig_int_builtins(8) +zig_int_builtins(16) +zig_int_builtins(32) +zig_int_builtins(64) + +#define zig_builtin8(name, val) __builtin_##name(val) +typedef unsigned int zig_Builtin8; + +#define zig_builtin16(name, val) __builtin_##name(val) +typedef unsigned int zig_Builtin16; + +#if INT_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name(val) +typedef unsigned int zig_Builtin32; +#elif LONG_MIN <= INT32_MIN +#define zig_builtin32(name, val) __builtin_##name##l(val) +typedef unsigned long zig_Builtin32; +#endif + +#if INT_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name(val) +typedef unsigned int zig_Builtin64; +#elif LONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##l(val) +typedef unsigned long zig_Builtin64; +#elif LLONG_MIN <= INT64_MIN +#define zig_builtin64(name, val) __builtin_##name##ll(val) +typedef unsigned long long zig_Builtin64; +#endif + +static inline uint8_t zig_byte_swap_u8(uint8_t val, uint8_t bits) { + return zig_wrap_u8(val >> (8 - bits), bits); +} + +static inline int8_t zig_byte_swap_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_byte_swap_u8((uint8_t)val, bits), bits); +} + +static inline uint16_t zig_byte_swap_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; +#if zig_has_builtin(bswap16) || defined(zig_gcc) + full_res = __builtin_bswap16(val); +#else + full_res = (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_byte_swap_u8((uint8_t)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits); +} + +static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; +#if zig_has_builtin(bswap32) || defined(zig_gcc) + full_res = __builtin_bswap32(val); +#else + full_res = (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_byte_swap_u16((uint16_t)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits); +} + +static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; +#if zig_has_builtin(bswap64) || defined(zig_gcc) + full_res = __builtin_bswap64(val); +#else + full_res = (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_byte_swap_u32((uint32_t)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline int64_t zig_byte_swap_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_byte_swap_u64((uint64_t)val, bits), bits); +} + +static inline uint8_t zig_bit_reverse_u8(uint8_t val, uint8_t bits) { + uint8_t full_res; +#if zig_has_builtin(bitreverse8) + full_res = __builtin_bitreverse8(val); +#else + static uint8_t const lut[0x10] = { + 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, + 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf + }; + full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0; +#endif + return zig_wrap_u8(full_res >> (8 - bits), bits); +} + +static inline int8_t zig_bit_reverse_i8(int8_t val, uint8_t bits) { + return zig_wrap_i8((int8_t)zig_bit_reverse_u8((uint8_t)val, bits), bits); +} + +static inline uint16_t zig_bit_reverse_u16(uint16_t val, uint8_t bits) { + uint16_t full_res; +#if zig_has_builtin(bitreverse16) + full_res = __builtin_bitreverse16(val); +#else + full_res = (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 8 | + (uint16_t)zig_bit_reverse_u8((uint8_t)(val >> 8), 8) >> 0; +#endif + return zig_wrap_u16(full_res >> (16 - bits), bits); +} + +static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) { + return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits); +} + +static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) { + uint32_t full_res; +#if zig_has_builtin(bitreverse32) + full_res = __builtin_bitreverse32(val); +#else + full_res = (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 0), 16) << 16 | + (uint32_t)zig_bit_reverse_u16((uint16_t)(val >> 16), 16) >> 0; +#endif + return zig_wrap_u32(full_res >> (32 - bits), bits); +} + +static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) { + return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits); +} + +static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) { + uint64_t full_res; +#if zig_has_builtin(bitreverse64) + full_res = __builtin_bitreverse64(val); +#else + full_res = (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 0), 32) << 32 | + (uint64_t)zig_bit_reverse_u32((uint32_t)(val >> 32), 32) >> 0; +#endif + return zig_wrap_u64(full_res >> (64 - bits), bits); +} + +static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) { + return zig_wrap_i64((int64_t)zig_bit_reverse_u64((uint64_t)val, bits), bits); +} + +#define zig_builtin_popcount_common(w) \ + static inline uint8_t zig_popcount_i##w(int##w##_t val, uint8_t bits) { \ + return zig_popcount_u##w((uint##w##_t)val, bits); \ + } +#if zig_has_builtin(popcount) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_builtin_popcount(w) \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ + (void)bits; \ + return zig_builtin##w(popcount, val); \ + } \ +\ + zig_builtin_popcount_common(w) +#else +#define zig_builtin_popcount(w) \ + static inline uint8_t zig_popcount_u##w(uint##w##_t val, uint8_t bits) { \ + (void)bits; \ + uint##w##_t temp = val - ((val >> 1) & (UINT##w##_MAX / 3)); \ + temp = (temp & (UINT##w##_MAX / 5)) + ((temp >> 2) & (UINT##w##_MAX / 5)); \ + temp = (temp + (temp >> 4)) & (UINT##w##_MAX / 17); \ + return temp * (UINT##w##_MAX / 255) >> (UINT8_C(w) - UINT8_C(8)); \ + } \ +\ + zig_builtin_popcount_common(w) +#endif +zig_builtin_popcount(8) +zig_builtin_popcount(16) +zig_builtin_popcount(32) +zig_builtin_popcount(64) + +#define zig_builtin_ctz_common(w) \ + static inline uint8_t zig_ctz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_ctz_u##w((uint##w##_t)val, bits); \ + } +#if zig_has_builtin(ctz) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_builtin_ctz(w) \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(ctz, val); \ + } \ +\ + zig_builtin_ctz_common(w) +#else +#define zig_builtin_ctz(w) \ + static inline uint8_t zig_ctz_u##w(uint##w##_t val, uint8_t bits) { \ + return zig_popcount_u##w(zig_not_u##w(val, bits) & zig_subw_u##w(val, 1, bits), bits); \ + } \ +\ + zig_builtin_ctz_common(w) +#endif +zig_builtin_ctz(8) +zig_builtin_ctz(16) +zig_builtin_ctz(32) +zig_builtin_ctz(64) + +#define zig_builtin_clz_common(w) \ + static inline uint8_t zig_clz_i##w(int##w##_t val, uint8_t bits) { \ + return zig_clz_u##w((uint##w##_t)val, bits); \ + } +#if zig_has_builtin(clz) || defined(zig_gcc) || defined(zig_tinyc) +#define zig_builtin_clz(w) \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ + if (val == 0) return bits; \ + return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \ + } \ +\ + zig_builtin_clz_common(w) +#else +#define zig_builtin_clz(w) \ + static inline uint8_t zig_clz_u##w(uint##w##_t val, uint8_t bits) { \ + return zig_ctz_u##w(zig_bit_reverse_u##w(val, bits), bits); \ + } \ +\ + zig_builtin_clz_common(w) +#endif +zig_builtin_clz(8) +zig_builtin_clz(16) +zig_builtin_clz(32) +zig_builtin_clz(64) + +/* ======================== 128-bit Integer Support ========================= */ + +#if !defined(zig_has_int128) +# if defined(__SIZEOF_INT128__) +# define zig_has_int128 1 +# else +# define zig_has_int128 0 +# endif +#endif + +#if zig_has_int128 + +typedef unsigned __int128 zig_u128; +typedef signed __int128 zig_i128; + +#define zig_make_u128(hi, lo) ((zig_u128)(hi)<<64|(lo)) +#define zig_make_i128(hi, lo) ((zig_i128)zig_make_u128(hi, lo)) +#define zig_init_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_init_i128(hi, lo) zig_make_i128(hi, lo) +#define zig_hi_u128(val) ((uint64_t)((val) >> 64)) +#define zig_lo_u128(val) ((uint64_t)((val) >> 0)) +#define zig_hi_i128(val) (( int64_t)((val) >> 64)) +#define zig_lo_i128(val) ((uint64_t)((val) >> 0)) +#define zig_bitCast_u128(val) ((zig_u128)(val)) +#define zig_bitCast_i128(val) ((zig_i128)(val)) +#define zig_cmp_int128(Type) \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs > rhs) - (lhs < rhs); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#else /* zig_has_int128 */ + +#if zig_little_endian +typedef struct { zig_align(16) uint64_t lo; uint64_t hi; } zig_u128; +typedef struct { zig_align(16) uint64_t lo; int64_t hi; } zig_i128; +#else +typedef struct { zig_align(16) uint64_t hi; uint64_t lo; } zig_u128; +typedef struct { zig_align(16) int64_t hi; uint64_t lo; } zig_i128; +#endif + +#define zig_make_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) }) +#define zig_make_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) }) + +#if defined(zig_msvc) /* MSVC doesn't allow struct literals in constant expressions */ +#define zig_init_u128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#define zig_init_i128(hi, lo) { .h##i = (hi), .l##o = (lo) } +#else /* But non-MSVC doesn't like the unprotected commas */ +#define zig_init_u128(hi, lo) zig_make_u128(hi, lo) +#define zig_init_i128(hi, lo) zig_make_i128(hi, lo) +#endif +#define zig_hi_u128(val) ((val).hi) +#define zig_lo_u128(val) ((val).lo) +#define zig_hi_i128(val) ((val).hi) +#define zig_lo_i128(val) ((val).lo) +#define zig_bitCast_u128(val) zig_make_u128((uint64_t)(val).hi, (val).lo) +#define zig_bitCast_i128(val) zig_make_i128(( int64_t)(val).hi, (val).lo) +#define zig_cmp_int128(Type) \ + static inline int32_t zig_cmp_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (lhs.hi == rhs.hi) \ + ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \ + : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \ + } +#define zig_bit_int128(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (zig_##Type){ .hi = lhs.hi operator rhs.hi, .lo = lhs.lo operator rhs.lo }; \ + } + +#endif /* zig_has_int128 */ + +#define zig_minInt_u128 zig_make_u128(zig_minInt_u64, zig_minInt_u64) +#define zig_maxInt_u128 zig_make_u128(zig_maxInt_u64, zig_maxInt_u64) +#define zig_minInt_i128 zig_make_i128(zig_minInt_i64, zig_minInt_u64) +#define zig_maxInt_i128 zig_make_i128(zig_maxInt_i64, zig_maxInt_u64) + +zig_cmp_int128(u128) +zig_cmp_int128(i128) + +zig_bit_int128(u128, and, &) +zig_bit_int128(i128, and, &) + +zig_bit_int128(u128, or, |) +zig_bit_int128(i128, or, |) + +zig_bit_int128(u128, xor, ^) +zig_bit_int128(i128, xor, ^) + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs); + +#if zig_has_int128 + +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return val ^ zig_maxInt_u(128, bits); +} + +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { + (void)bits; + return ~val; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { + return lhs >> rhs; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { + return lhs << rhs; +} + +static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) { + zig_i128 sign_mask = lhs < zig_make_i128(0, 0) ? -zig_make_i128(0, 1) : zig_make_i128(0, 0); + return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { + return lhs << rhs; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs + rhs; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs + rhs; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs - rhs; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs - rhs; +} + +static inline zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs * rhs; +} + +static inline zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs * rhs; +} + +static inline zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs / rhs; +} + +static inline zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs / rhs; +} + +static inline zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return lhs % rhs; +} + +static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return lhs % rhs; +} + +#else /* zig_has_int128 */ + +static inline zig_u128 zig_not_u128(zig_u128 val, uint8_t bits) { + return (zig_u128){ .hi = zig_not_u64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; +} + +static inline zig_i128 zig_not_i128(zig_i128 val, uint8_t bits) { + return (zig_i128){ .hi = zig_not_i64(val.hi, bits - UINT8_C(64)), .lo = zig_not_u64(val.lo, UINT8_C(64)) }; +} + +static inline zig_u128 zig_shr_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = zig_minInt_u64, .lo = lhs.hi >> (rhs - UINT8_C(64)) }; + return (zig_u128){ .hi = lhs.hi >> rhs, .lo = lhs.hi << (UINT8_C(64) - rhs) | lhs.lo >> rhs }; +} + +static inline zig_u128 zig_shl_u128(zig_u128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_u128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_i128 zig_shr_i128(zig_i128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = zig_shr_i64(lhs.hi, 63), .lo = zig_shr_i64(lhs.hi, (rhs - UINT8_C(64))) }; + return (zig_i128){ .hi = zig_shr_i64(lhs.hi, rhs), .lo = lhs.lo >> rhs | (uint64_t)lhs.hi << (UINT8_C(64) - rhs) }; +} + +static inline zig_i128 zig_shl_i128(zig_i128 lhs, uint8_t rhs) { + if (rhs == UINT8_C(0)) return lhs; + if (rhs >= UINT8_C(64)) return (zig_i128){ .hi = lhs.lo << (rhs - UINT8_C(64)), .lo = zig_minInt_u64 }; + return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (UINT8_C(64) - rhs), .lo = lhs.lo << rhs }; +} + +static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) { + zig_u128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 res; + res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, 64); + return res; +} + +zig_extern zig_i128 __multi3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) { + return __multi3(lhs, rhs); +} + +static zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_bitCast_u128(zig_mul_i128(zig_bitCast_i128(lhs), zig_bitCast_i128(rhs))); +} + +zig_extern zig_u128 __udivti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) { + return __udivti3(lhs, rhs); +} + +zig_extern zig_i128 __divti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) { + return __divti3(lhs, rhs); +} + +zig_extern zig_u128 __umodti3(zig_u128 lhs, zig_u128 rhs); +static zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) { + return __umodti3(lhs, rhs); +} + +zig_extern zig_i128 __modti3(zig_i128 lhs, zig_i128 rhs); +static zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) { + return __modti3(lhs, rhs); +} + +#endif /* zig_has_int128 */ + +#define zig_div_floor_u128 zig_div_trunc_u128 + +static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0) + ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0); + return zig_add_i128(zig_div_trunc_i128(lhs, rhs), zig_make_i128(mask, (uint64_t)mask)); +} + +#define zig_mod_u128 zig_rem_u128 + +static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) { + zig_i128 rem = zig_rem_i128(lhs, rhs); + int64_t mask = zig_or_u64((uint64_t)zig_hi_i128(rem), zig_lo_i128(rem)) != UINT64_C(0) + ? zig_shr_i64(zig_xor_i64(zig_hi_i128(lhs), zig_hi_i128(rhs)), UINT8_C(63)) : INT64_C(0); + return zig_add_i128(rem, zig_and_i128(rhs, zig_make_i128(mask, (uint64_t)mask))); +} + +static inline zig_u128 zig_min_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) < INT32_C(0) ? lhs : rhs; +} + +static inline zig_i128 zig_min_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) < INT32_C(0) ? lhs : rhs; +} + +static inline zig_u128 zig_max_u128(zig_u128 lhs, zig_u128 rhs) { + return zig_cmp_u128(lhs, rhs) > INT32_C(0) ? lhs : rhs; +} + +static inline zig_i128 zig_max_i128(zig_i128 lhs, zig_i128 rhs) { + return zig_cmp_i128(lhs, rhs) > INT32_C(0) ? lhs : rhs; +} + +static inline zig_u128 zig_wrap_u128(zig_u128 val, uint8_t bits) { + return zig_and_u128(val, zig_maxInt_u(128, bits)); +} + +static inline zig_i128 zig_wrap_i128(zig_i128 val, uint8_t bits) { + if (bits > UINT8_C(64)) return zig_make_i128(zig_wrap_i64(zig_hi_i128(val), bits - UINT8_C(64)), zig_lo_i128(val)); + int64_t lo = zig_wrap_i64((int64_t)zig_lo_i128(val), bits); + return zig_make_i128(zig_shr_i64(lo, 63), (uint64_t)lo); +} + +static inline zig_u128 zig_shlw_u128(zig_u128 lhs, uint8_t rhs, uint8_t bits) { + return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_shlw_i128(zig_i128 lhs, uint8_t rhs, uint8_t bits) { + return zig_wrap_i128(zig_bitCast_i128(zig_shl_u128(zig_bitCast_u128(lhs), rhs)), bits); +} + +static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + return zig_wrap_u128(zig_add_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + return zig_wrap_i128(zig_bitCast_i128(zig_add_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); +} + +static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + return zig_wrap_i128(zig_bitCast_i128(zig_sub_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); +} + +static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits); +} + +static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + return zig_wrap_i128(zig_bitCast_i128(zig_mul_u128(zig_bitCast_u128(lhs), zig_bitCast_u128(rhs))), bits); +} + +static inline zig_u128 zig_abs_i128(zig_i128 val) { + zig_i128 tmp = zig_shr_i128(val, 127); + return zig_bitCast_u128(zig_sub_i128(zig_xor_i128(val, tmp), tmp)); +} + +#if zig_has_int128 + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) + zig_u128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); +#else + *res = zig_addw_u128(lhs, rhs, bits); + return *res < lhs; +#endif +} + +zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) + zig_i128 full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) + zig_u128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); +#else + *res = zig_subw_u128(lhs, rhs, bits); + return *res > lhs; +#endif +} + +zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) + zig_i128 full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) + zig_u128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u128(full_res, bits); + return overflow || full_res < zig_minInt_u(128, bits) || full_res > zig_maxInt_u(128, bits); +#else + *res = zig_mulw_u128(lhs, rhs, bits); + return rhs != zig_make_u128(0, 0) && lhs > zig_maxInt_u(128, bits) / rhs; +#endif +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) + zig_i128 full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); +#else + int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0; +#endif + *res = zig_wrap_i128(full_res, bits); + return overflow || full_res < zig_minInt_i(128, bits) || full_res > zig_maxInt_i(128, bits); +} + +#else /* zig_has_int128 */ + +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + uint64_t hi; + bool overflow = zig_addo_u64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_addo_u64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); +} + +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int64_t hi; + bool overflow = zig_addo_i64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_addo_i64(&res->hi, hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); +} + +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + uint64_t hi; + bool overflow = zig_subo_u64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_subo_u64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); +} + +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int64_t hi; + bool overflow = zig_subo_i64(&hi, lhs.hi, rhs.hi, bits - 64); + return overflow ^ zig_subo_i64(&res->hi, hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, 64), bits - 64); +} + +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + *res = zig_mulw_u128(lhs, rhs, bits); + return zig_cmp_u128(*res, zig_make_u128(0, 0)) != INT32_C(0) && + zig_cmp_u128(lhs, zig_div_trunc_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); +} + +zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, int *overflow); +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + int overflow_int; + zig_i128 full_res = __muloti4(lhs, rhs, &overflow_int); + bool overflow = overflow_int != 0 || + zig_cmp_i128(full_res, zig_minInt_i(128, bits)) < INT32_C(0) || + zig_cmp_i128(full_res, zig_maxInt_i(128, bits)) > INT32_C(0); + *res = zig_wrap_i128(full_res, bits); + return overflow; +} + +#endif /* zig_has_int128 */ + +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, uint8_t rhs, uint8_t bits) { + *res = zig_shlw_u128(lhs, rhs, bits); + return zig_cmp_u128(lhs, zig_shr_u128(zig_maxInt_u(128, bits), rhs)) > INT32_C(0); +} + +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, uint8_t rhs, uint8_t bits) { + *res = zig_shlw_i128(lhs, rhs, bits); + zig_i128 mask = zig_bitCast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - UINT8_C(1))); + return zig_cmp_i128(zig_and_i128(lhs, mask), zig_make_i128(0, 0)) != INT32_C(0) && + zig_cmp_i128(zig_and_i128(lhs, mask), mask) != INT32_C(0); +} + +static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + zig_u128 res; + if (zig_cmp_u128(rhs, zig_make_u128(0, bits)) >= INT32_C(0)) + return zig_cmp_u128(lhs, zig_make_u128(0, 0)) != INT32_C(0) ? zig_maxInt_u(128, bits) : lhs; + return zig_shlo_u128(&res, lhs, (uint8_t)zig_lo_u128(rhs), bits) ? zig_maxInt_u(128, bits) : res; +} + +static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + zig_i128 res; + if (zig_cmp_u128(zig_bitCast_u128(rhs), zig_make_u128(0, bits)) < INT32_C(0) && !zig_shlo_i128(&res, lhs, (uint8_t)zig_lo_i128(rhs), bits)) return res; + return zig_cmp_i128(lhs, zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); +} + +static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + zig_u128 res; + return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; +} + +static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + zig_i128 res; + if (!zig_addo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); +} + +static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + zig_u128 res; + return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt_u(128, bits) : res; +} + +static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + zig_i128 res; + if (!zig_subo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(res, zig_make_i128(0, 0)) >= INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); +} + +static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, uint8_t bits) { + zig_u128 res; + return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt_u(128, bits) : res; +} + +static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, uint8_t bits) { + zig_i128 res; + if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res; + return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_make_i128(0, 0)) < INT32_C(0) ? zig_minInt_i(128, bits) : zig_maxInt_i(128, bits); +} + +static inline uint8_t zig_clz_u128(zig_u128 val, uint8_t bits) { + if (bits <= UINT8_C(64)) return zig_clz_u64(zig_lo_u128(val), bits); + if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - UINT8_C(64)); + return zig_clz_u64(zig_lo_u128(val), UINT8_C(64)) + (bits - UINT8_C(64)); +} + +static inline uint8_t zig_clz_i128(zig_i128 val, uint8_t bits) { + return zig_clz_u128(zig_bitCast_u128(val), bits); +} + +static inline uint8_t zig_ctz_u128(zig_u128 val, uint8_t bits) { + if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), UINT8_C(64)); + return zig_ctz_u64(zig_hi_u128(val), bits - UINT8_C(64)) + UINT8_C(64); +} + +static inline uint8_t zig_ctz_i128(zig_i128 val, uint8_t bits) { + return zig_ctz_u128(zig_bitCast_u128(val), bits); +} + +static inline uint8_t zig_popcount_u128(zig_u128 val, uint8_t bits) { + return zig_popcount_u64(zig_hi_u128(val), bits - UINT8_C(64)) + + zig_popcount_u64(zig_lo_u128(val), UINT8_C(64)); +} + +static inline uint8_t zig_popcount_i128(zig_i128 val, uint8_t bits) { + return zig_popcount_u128(zig_bitCast_u128(val), bits); +} + +static inline zig_u128 zig_byte_swap_u128(zig_u128 val, uint8_t bits) { + zig_u128 full_res; +#if zig_has_builtin(bswap128) + full_res = __builtin_bswap128(val); +#else + full_res = zig_make_u128(zig_byte_swap_u64(zig_lo_u128(val), UINT8_C(64)), + zig_byte_swap_u64(zig_hi_u128(val), UINT8_C(64))); +#endif + return zig_shr_u128(full_res, UINT8_C(128) - bits); +} + +static inline zig_i128 zig_byte_swap_i128(zig_i128 val, uint8_t bits) { + return zig_bitCast_i128(zig_byte_swap_u128(zig_bitCast_u128(val), bits)); +} + +static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, uint8_t bits) { + return zig_shr_u128(zig_make_u128(zig_bit_reverse_u64(zig_lo_u128(val), UINT8_C(64)), + zig_bit_reverse_u64(zig_hi_u128(val), UINT8_C(64))), + UINT8_C(128) - bits); +} + +static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, uint8_t bits) { + return zig_bitCast_i128(zig_bit_reverse_u128(zig_bitCast_u128(val), bits)); +} + +/* ========================== Big Integer Support =========================== */ + +static inline uint16_t zig_int_bytes(uint16_t bits) { + uint16_t bytes = (bits + CHAR_BIT - 1) / CHAR_BIT; + uint16_t alignment = ZIG_TARGET_MAX_INT_ALIGNMENT; + while (alignment / 2 >= bytes) alignment /= 2; + return (bytes + alignment - 1) / alignment * alignment; +} + +static inline int32_t zig_cmp_big(const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + bool do_signed = is_signed; + uint16_t remaining_bytes = zig_int_bytes(bits); + +#if zig_little_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { + int32_t limb_cmp; + +#if zig_little_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + if (do_signed) { + zig_i128 lhs_limb; + zig_i128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_cmp = zig_cmp_i128(lhs_limb, rhs_limb); + do_signed = false; + } else { + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_cmp = zig_cmp_u128(lhs_limb, rhs_limb); + } + + if (limb_cmp != 0) return limb_cmp; + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + if (do_signed) { + int64_t lhs_limb; + int64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + do_signed = false; + } else { + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + } + + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + if (do_signed) { + int32_t lhs_limb; + int32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + do_signed = false; + } else { + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + } + + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + if (do_signed) { + int16_t lhs_limb; + int16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + do_signed = false; + } else { + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + } + + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + if (do_signed) { + int8_t lhs_limb; + int8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + do_signed = false; + } else { + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + if (lhs_limb != rhs_limb) return (lhs_limb > rhs_limb) - (lhs_limb < rhs_limb); + } + + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return 0; +} + +static inline void zig_and_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_and_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + +static inline void zig_or_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_or_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + +static inline void zig_xor_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + (void)is_signed; + + while (remaining_bytes >= 128 / CHAR_BIT) { + zig_u128 res_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u128(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 128 / CHAR_BIT; + byte_offset += 128 / CHAR_BIT; + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint64_t res_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u64(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 64 / CHAR_BIT; + byte_offset += 64 / CHAR_BIT; + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint32_t res_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u32(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 32 / CHAR_BIT; + byte_offset += 32 / CHAR_BIT; + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint16_t res_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u16(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 16 / CHAR_BIT; + byte_offset += 16 / CHAR_BIT; + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t res_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + res_limb = zig_xor_u8(lhs_limb, rhs_limb); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + + remaining_bytes -= 8 / CHAR_BIT; + byte_offset += 8 / CHAR_BIT; + } +} + +static inline bool zig_addo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + uint8_t top_bits = (uint8_t)(remaining_bytes * 8 - bits); + bool overflow = false; + +#if zig_big_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { + uint8_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + if (remaining_bytes == 128 / CHAR_BIT && is_signed) { + zig_i128 res_limb; + zig_i128 tmp_limb; + zig_i128 lhs_limb; + zig_i128 rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + zig_u128 res_limb; + zig_u128 tmp_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint8_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + if (remaining_bytes == 64 / CHAR_BIT && is_signed) { + int64_t res_limb; + int64_t tmp_limb; + int64_t lhs_limb; + int64_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint64_t res_limb; + uint64_t tmp_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint8_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + if (remaining_bytes == 32 / CHAR_BIT && is_signed) { + int32_t res_limb; + int32_t tmp_limb; + int32_t lhs_limb; + int32_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint32_t res_limb; + uint32_t tmp_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint8_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + if (remaining_bytes == 16 / CHAR_BIT && is_signed) { + int16_t res_limb; + int16_t tmp_limb; + int16_t lhs_limb; + int16_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint16_t res_limb; + uint16_t tmp_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + if (remaining_bytes == 8 / CHAR_BIT && is_signed) { + int8_t res_limb; + int8_t tmp_limb; + int8_t lhs_limb; + int8_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint8_t res_limb; + uint8_t tmp_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_addo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_addo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return overflow; +} + +static inline bool zig_subo_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + uint8_t *res_bytes = res; + const uint8_t *lhs_bytes = lhs; + const uint8_t *rhs_bytes = rhs; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + uint8_t top_bits = (uint8_t)(remaining_bytes * 8 - bits); + bool overflow = false; + +#if zig_big_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { + uint8_t limb_bits = 128 - (remaining_bytes == 128 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + if (remaining_bytes == 128 / CHAR_BIT && is_signed) { + zig_i128 res_limb; + zig_i128 tmp_limb; + zig_i128 lhs_limb; + zig_i128 rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_i128(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_i128(&res_limb, tmp_limb, zig_make_i128(INT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + zig_u128 res_limb; + zig_u128 tmp_limb; + zig_u128 lhs_limb; + zig_u128 rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_u128(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_u128(&res_limb, tmp_limb, zig_make_u128(UINT64_C(0), overflow ? UINT64_C(1) : UINT64_C(0)), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { + uint8_t limb_bits = 64 - (remaining_bytes == 64 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + if (remaining_bytes == 64 / CHAR_BIT && is_signed) { + int64_t res_limb; + int64_t tmp_limb; + int64_t lhs_limb; + int64_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_i64(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_i64(&res_limb, tmp_limb, overflow ? INT64_C(1) : INT64_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint64_t res_limb; + uint64_t tmp_limb; + uint64_t lhs_limb; + uint64_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_u64(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_u64(&res_limb, tmp_limb, overflow ? UINT64_C(1) : UINT64_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { + uint8_t limb_bits = 32 - (remaining_bytes == 32 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + if (remaining_bytes == 32 / CHAR_BIT && is_signed) { + int32_t res_limb; + int32_t tmp_limb; + int32_t lhs_limb; + int32_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_i32(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_i32(&res_limb, tmp_limb, overflow ? INT32_C(1) : INT32_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint32_t res_limb; + uint32_t tmp_limb; + uint32_t lhs_limb; + uint32_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_u32(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_u32(&res_limb, tmp_limb, overflow ? UINT32_C(1) : UINT32_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { + uint8_t limb_bits = 16 - (remaining_bytes == 16 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + if (remaining_bytes == 16 / CHAR_BIT && is_signed) { + int16_t res_limb; + int16_t tmp_limb; + int16_t lhs_limb; + int16_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_i16(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_i16(&res_limb, tmp_limb, overflow ? INT16_C(1) : INT16_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint16_t res_limb; + uint16_t tmp_limb; + uint16_t lhs_limb; + uint16_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_u16(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_u16(&res_limb, tmp_limb, overflow ? UINT16_C(1) : UINT16_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { + uint8_t limb_bits = 8 - (remaining_bytes == 8 / CHAR_BIT ? top_bits : 0); + +#if zig_big_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + if (remaining_bytes == 8 / CHAR_BIT && is_signed) { + int8_t res_limb; + int8_t tmp_limb; + int8_t lhs_limb; + int8_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_i8(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_i8(&res_limb, tmp_limb, overflow ? INT8_C(1) : INT8_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } else { + uint8_t res_limb; + uint8_t tmp_limb; + uint8_t lhs_limb; + uint8_t rhs_limb; + bool limb_overflow; + + memcpy(&lhs_limb, &lhs_bytes[byte_offset], sizeof(lhs_limb)); + memcpy(&rhs_limb, &rhs_bytes[byte_offset], sizeof(rhs_limb)); + limb_overflow = zig_subo_u8(&tmp_limb, lhs_limb, rhs_limb, limb_bits); + overflow = limb_overflow ^ zig_subo_u8(&res_limb, tmp_limb, overflow ? UINT8_C(1) : UINT8_C(0), limb_bits); + memcpy(&res_bytes[byte_offset], &res_limb, sizeof(res_limb)); + } + + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return overflow; +} + +static inline void zig_addw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + (void)zig_addo_big(res, lhs, rhs, is_signed, bits); +} + +static inline void zig_subw_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + (void)zig_subo_big(res, lhs, rhs, is_signed, bits); +} + +zig_extern void __udivei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits); +static inline void zig_div_trunc_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + if (!is_signed) { + __udivei4(res, lhs, rhs, bits); + return; + } + + zig_trap(); +} + +static inline void zig_div_floor_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + if (!is_signed) { + zig_div_trunc_big(res, lhs, rhs, is_signed, bits); + return; + } + + zig_trap(); +} + +zig_extern void __umodei4(uint32_t *res, const uint32_t *lhs, const uint32_t *rhs, uintptr_t bits); +static inline void zig_rem_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + if (!is_signed) { + __umodei4(res, lhs, rhs, bits); + return; + } + + zig_trap(); +} + +static inline void zig_mod_big(void *res, const void *lhs, const void *rhs, bool is_signed, uint16_t bits) { + if (!is_signed) { + zig_rem_big(res, lhs, rhs, is_signed, bits); + return; + } + + zig_trap(); +} + +static inline uint16_t zig_clz_big(const void *val, bool is_signed, uint16_t bits) { + const uint8_t *val_bytes = val; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + uint16_t skip_bits = remaining_bytes * 8 - bits; + uint16_t total_lz = 0; + uint16_t limb_lz; + (void)is_signed; + +#if zig_little_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + { + zig_u128 val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_lz = zig_clz_u128(val_limb, 128 - skip_bits); + } + + total_lz += limb_lz; + if (limb_lz < 128 - skip_bits) return total_lz; + skip_bits = 0; + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + { + uint64_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_lz = zig_clz_u64(val_limb, 64 - skip_bits); + } + + total_lz += limb_lz; + if (limb_lz < 64 - skip_bits) return total_lz; + skip_bits = 0; + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + { + uint32_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_lz = zig_clz_u32(val_limb, 32 - skip_bits); + } + + total_lz += limb_lz; + if (limb_lz < 32 - skip_bits) return total_lz; + skip_bits = 0; + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + { + uint16_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_lz = zig_clz_u16(val_limb, 16 - skip_bits); + } + + total_lz += limb_lz; + if (limb_lz < 16 - skip_bits) return total_lz; + skip_bits = 0; + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { +#if zig_little_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + { + uint8_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_lz = zig_clz_u8(val_limb, 8 - skip_bits); + } + + total_lz += limb_lz; + if (limb_lz < 8 - skip_bits) return total_lz; + skip_bits = 0; + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_big_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return total_lz; +} + +static inline uint16_t zig_ctz_big(const void *val, bool is_signed, uint16_t bits) { + const uint8_t *val_bytes = val; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + uint16_t total_tz = 0; + uint16_t limb_tz; + (void)is_signed; + +#if zig_big_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + { + zig_u128 val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_tz = zig_ctz_u128(val_limb, 128); + } + + total_tz += limb_tz; + if (limb_tz < 128) return total_tz; + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + { + uint64_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_tz = zig_ctz_u64(val_limb, 64); + } + + total_tz += limb_tz; + if (limb_tz < 64) return total_tz; + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + { + uint32_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_tz = zig_ctz_u32(val_limb, 32); + } + + total_tz += limb_tz; + if (limb_tz < 32) return total_tz; + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + { + uint16_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_tz = zig_ctz_u16(val_limb, 16); + } + + total_tz += limb_tz; + if (limb_tz < 16) return total_tz; + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + { + uint8_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + limb_tz = zig_ctz_u8(val_limb, 8); + } + + total_tz += limb_tz; + if (limb_tz < 8) return total_tz; + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return total_tz; +} + +static inline uint16_t zig_popcount_big(const void *val, bool is_signed, uint16_t bits) { + const uint8_t *val_bytes = val; + uint16_t byte_offset = 0; + uint16_t remaining_bytes = zig_int_bytes(bits); + uint16_t total_pc = 0; + (void)is_signed; + +#if zig_big_endian + byte_offset = remaining_bytes; +#endif + + while (remaining_bytes >= 128 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 128 / CHAR_BIT; +#endif + + { + zig_u128 val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + total_pc += zig_popcount_u128(val_limb, 128); + } + + remaining_bytes -= 128 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 128 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 64 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 64 / CHAR_BIT; +#endif + + { + uint64_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + total_pc += zig_popcount_u64(val_limb, 64); + } + + remaining_bytes -= 64 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 64 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 32 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 32 / CHAR_BIT; +#endif + + { + uint32_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + total_pc += zig_popcount_u32(val_limb, 32); + } + + remaining_bytes -= 32 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 32 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 16 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 16 / CHAR_BIT; +#endif + + { + uint16_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + total_pc = zig_popcount_u16(val_limb, 16); + } + + remaining_bytes -= 16 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 16 / CHAR_BIT; +#endif + } + + while (remaining_bytes >= 8 / CHAR_BIT) { +#if zig_big_endian + byte_offset -= 8 / CHAR_BIT; +#endif + + { + uint8_t val_limb; + + memcpy(&val_limb, &val_bytes[byte_offset], sizeof(val_limb)); + total_pc = zig_popcount_u8(val_limb, 8); + } + + remaining_bytes -= 8 / CHAR_BIT; + +#if zig_little_endian + byte_offset += 8 / CHAR_BIT; +#endif + } + + return total_pc; +} + +/* ========================= Floating Point Support ========================= */ + +#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ +#define __STDC_WANT_IEC_60559_TYPES_EXT__ +#endif + +#include + +#if defined(zig_msvc) +float __cdecl nanf(char const* input); +double __cdecl nan(char const* input); +long double __cdecl nanl(char const* input); + +#define zig_msvc_flt_inf ((double)(1e+300 * 1e+300)) +#define zig_msvc_flt_inff ((float)(1e+300 * 1e+300)) +#define zig_msvc_flt_infl ((long double)(1e+300 * 1e+300)) +#define zig_msvc_flt_nan ((double)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanf ((float)(zig_msvc_flt_inf * 0.f)) +#define zig_msvc_flt_nanl ((long double)(zig_msvc_flt_inf * 0.f)) +#define __builtin_nan(str) nan(str) +#define __builtin_nanf(str) nanf(str) +#define __builtin_nanl(str) nanl(str) +#define __builtin_inf() zig_msvc_flt_inf +#define __builtin_inff() zig_msvc_flt_inff +#define __builtin_infl() zig_msvc_flt_infl +#endif + +#if (zig_has_builtin(nan) && zig_has_builtin(nans) && zig_has_builtin(inf)) || defined(zig_gcc) +#define zig_make_special_f16(sign, name, arg, repr) sign zig_make_f16 (__builtin_##name, )(arg) +#define zig_make_special_f32(sign, name, arg, repr) sign zig_make_f32 (__builtin_##name, )(arg) +#define zig_make_special_f64(sign, name, arg, repr) sign zig_make_f64 (__builtin_##name, )(arg) +#define zig_make_special_f80(sign, name, arg, repr) sign zig_make_f80 (__builtin_##name, )(arg) +#define zig_make_special_f128(sign, name, arg, repr) sign zig_make_f128(__builtin_##name, )(arg) +#else +#define zig_make_special_f16(sign, name, arg, repr) zig_bitCast_f16 (repr) +#define zig_make_special_f32(sign, name, arg, repr) zig_bitCast_f32 (repr) +#define zig_make_special_f64(sign, name, arg, repr) zig_bitCast_f64 (repr) +#define zig_make_special_f80(sign, name, arg, repr) zig_bitCast_f80 (repr) +#define zig_make_special_f128(sign, name, arg, repr) zig_bitCast_f128(repr) +#endif + +#define zig_has_f16 1 +#define zig_libc_name_f16(name) __##name##h +#define zig_init_special_f16(sign, name, arg, repr) zig_make_special_f16(sign, name, arg, repr) +#if FLT_MANT_DIG == 11 +typedef float zig_f16; +#define zig_make_f16(fp, repr) fp##f +#elif DBL_MANT_DIG == 11 +typedef double zig_f16; +#define zig_make_f16(fp, repr) fp +#elif LDBL_MANT_DIG == 11 +typedef long double zig_f16; +#define zig_make_f16(fp, repr) fp##l +#elif FLT16_MANT_DIG == 11 && (zig_has_builtin(inff16) || defined(zig_gcc)) +typedef _Float16 zig_f16; +#define zig_make_f16(fp, repr) fp##f16 +#elif defined(__SIZEOF_FP16__) +typedef __fp16 zig_f16; +#define zig_make_f16(fp, repr) fp##f16 +#else +#undef zig_has_f16 +#define zig_has_f16 0 +#define zig_repr_f16 u16 +typedef uint16_t zig_f16; +#define zig_make_f16(fp, repr) repr +#undef zig_make_special_f16 +#define zig_make_special_f16(sign, name, arg, repr) repr +#undef zig_init_special_f16 +#define zig_init_special_f16(sign, name, arg, repr) repr +#endif +#if defined(zig_darwin) && defined(zig_x86) +typedef uint16_t zig_compiler_rt_f16; +#else +typedef zig_f16 zig_compiler_rt_f16; +#endif + +#define zig_has_f32 1 +#define zig_libc_name_f32(name) name##f +#if defined(zig_msvc) +#define zig_init_special_f32(sign, name, arg, repr) sign zig_make_f32(zig_msvc_flt_##name, ) +#else +#define zig_init_special_f32(sign, name, arg, repr) zig_make_special_f32(sign, name, arg, repr) +#endif +#if FLT_MANT_DIG == 24 +typedef float zig_f32; +#define zig_make_f32(fp, repr) fp##f +#elif DBL_MANT_DIG == 24 +typedef double zig_f32; +#define zig_make_f32(fp, repr) fp +#elif LDBL_MANT_DIG == 24 +typedef long double zig_f32; +#define zig_make_f32(fp, repr) fp##l +#elif FLT32_MANT_DIG == 24 +typedef _Float32 zig_f32; +#define zig_make_f32(fp, repr) fp##f32 +#else +#undef zig_has_f32 +#define zig_has_f32 0 +#define zig_repr_f32 u32 +typedef uint32_t zig_f32; +#define zig_make_f32(fp, repr) repr +#undef zig_make_special_f32 +#define zig_make_special_f32(sign, name, arg, repr) repr +#undef zig_init_special_f32 +#define zig_init_special_f32(sign, name, arg, repr) repr +#endif + +#define zig_has_f64 1 +#define zig_libc_name_f64(name) name + +#if defined(zig_msvc) +#define zig_init_special_f64(sign, name, arg, repr) sign zig_make_f64(zig_msvc_flt_##name, ) +#else +#define zig_init_special_f64(sign, name, arg, repr) zig_make_special_f64(sign, name, arg, repr) +#endif +#if FLT_MANT_DIG == 53 +typedef float zig_f64; +#define zig_make_f64(fp, repr) fp##f +#elif DBL_MANT_DIG == 53 +typedef double zig_f64; +#define zig_make_f64(fp, repr) fp +#elif LDBL_MANT_DIG == 53 +typedef long double zig_f64; +#define zig_make_f64(fp, repr) fp##l +#elif FLT64_MANT_DIG == 53 +typedef _Float64 zig_f64; +#define zig_make_f64(fp, repr) fp##f64 +#elif FLT32X_MANT_DIG == 53 +typedef _Float32x zig_f64; +#define zig_make_f64(fp, repr) fp##f32x +#else +#undef zig_has_f64 +#define zig_has_f64 0 +#define zig_repr_f64 u64 +typedef uint64_t zig_f64; +#define zig_make_f64(fp, repr) repr +#undef zig_make_special_f64 +#define zig_make_special_f64(sign, name, arg, repr) repr +#undef zig_init_special_f64 +#define zig_init_special_f64(sign, name, arg, repr) repr +#endif + +#define zig_has_f80 1 +#define zig_libc_name_f80(name) __##name##x +#define zig_init_special_f80(sign, name, arg, repr) zig_make_special_f80(sign, name, arg, repr) +#if FLT_MANT_DIG == 64 +typedef float zig_f80; +#define zig_make_f80(fp, repr) fp##f +#elif DBL_MANT_DIG == 64 +typedef double zig_f80; +#define zig_make_f80(fp, repr) fp +#elif LDBL_MANT_DIG == 64 +typedef long double zig_f80; +#define zig_make_f80(fp, repr) fp##l +#elif FLT80_MANT_DIG == 64 +typedef _Float80 zig_f80; +#define zig_make_f80(fp, repr) fp##f80 +#elif FLT64X_MANT_DIG == 64 +typedef _Float64x zig_f80; +#define zig_make_f80(fp, repr) fp##f64x +#elif defined(__SIZEOF_FLOAT80__) +typedef __float80 zig_f80; +#define zig_make_f80(fp, repr) fp##l +#else +#undef zig_has_f80 +#define zig_has_f80 0 +#define zig_repr_f80 u128 +typedef zig_u128 zig_f80; +#define zig_make_f80(fp, repr) repr +#undef zig_make_special_f80 +#define zig_make_special_f80(sign, name, arg, repr) repr +#undef zig_init_special_f80 +#define zig_init_special_f80(sign, name, arg, repr) repr +#endif + +#if defined(zig_gcc) && defined(zig_x86) +#define zig_f128_has_miscompilations 1 +#else +#define zig_f128_has_miscompilations 0 +#endif + +#define zig_has_f128 1 +#define zig_libc_name_f128(name) name##q +#define zig_init_special_f128(sign, name, arg, repr) zig_make_special_f128(sign, name, arg, repr) +#if !zig_f128_has_miscompilations && FLT_MANT_DIG == 113 +typedef float zig_f128; +#define zig_make_f128(fp, repr) fp##f +#elif !zig_f128_has_miscompilations && DBL_MANT_DIG == 113 +typedef double zig_f128; +#define zig_make_f128(fp, repr) fp +#elif !zig_f128_has_miscompilations && LDBL_MANT_DIG == 113 +typedef long double zig_f128; +#define zig_make_f128(fp, repr) fp##l +#elif !zig_f128_has_miscompilations && FLT128_MANT_DIG == 113 +typedef _Float128 zig_f128; +#define zig_make_f128(fp, repr) fp##f128 +#elif !zig_f128_has_miscompilations && FLT64X_MANT_DIG == 113 +typedef _Float64x zig_f128; +#define zig_make_f128(fp, repr) fp##f64x +#elif !zig_f128_has_miscompilations && defined(__SIZEOF_FLOAT128__) +typedef __float128 zig_f128; +#define zig_make_f128(fp, repr) fp##q +#undef zig_make_special_f128 +#define zig_make_special_f128(sign, name, arg, repr) sign __builtin_##name##f128(arg) +#else +#undef zig_has_f128 +#define zig_has_f128 0 +#undef zig_make_special_f128 +#undef zig_init_special_f128 +#if defined(zig_darwin) || defined(zig_aarch64) +typedef __attribute__((__vector_size__(2 * sizeof(uint64_t)))) uint64_t zig_v2u64; +zig_basic_operator(zig_v2u64, xor_v2u64, ^) +#define zig_repr_f128 v2u64 +typedef zig_v2u64 zig_f128; +#define zig_make_f128_zig_make_u128(hi, lo) (zig_f128){ lo, hi } +#define zig_make_f128_zig_init_u128 zig_make_f128_zig_make_u128 +#define zig_make_f128(fp, repr) zig_make_f128_##repr +#define zig_make_special_f128(sign, name, arg, repr) zig_make_f128_##repr +#define zig_init_special_f128(sign, name, arg, repr) zig_make_f128_##repr +#else +#define zig_repr_f128 u128 +typedef zig_u128 zig_f128; +#define zig_make_f128(fp, repr) repr +#define zig_make_special_f128(sign, name, arg, repr) repr +#define zig_init_special_f128(sign, name, arg, repr) repr +#endif +#endif + +#if !defined(zig_msvc) && defined(ZIG_TARGET_ABI_MSVC) +/* Emulate msvc abi on a gnu compiler */ +typedef zig_f64 zig_c_longdouble; +#elif defined(zig_msvc) && !defined(ZIG_TARGET_ABI_MSVC) +/* Emulate gnu abi on an msvc compiler */ +typedef zig_f128 zig_c_longdouble; +#else +/* Target and compiler abi match */ +typedef long double zig_c_longdouble; +#endif + +#define zig_bitCast_float(Type, ReprType) \ + static inline zig_##Type zig_bitCast_##Type(ReprType repr) { \ + zig_##Type result; \ + memcpy(&result, &repr, sizeof(result)); \ + return result; \ + } +zig_bitCast_float(f16, uint16_t) +zig_bitCast_float(f32, uint32_t) +zig_bitCast_float(f64, uint64_t) +zig_bitCast_float(f80, zig_u128) +zig_bitCast_float(f128, zig_u128) + +#define zig_convert_builtin(ExternResType, ResType, operation, ExternArgType, ArgType, version) \ + zig_extern ExternResType zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(ExternArgType); \ + static inline ResType zig_expand_concat(zig_expand_concat(zig_##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType)(ArgType arg) { \ + ResType res; \ + ExternResType extern_res; \ + ExternArgType extern_arg; \ + memcpy(&extern_arg, &arg, sizeof(extern_arg)); \ + extern_res = zig_expand_concat(zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_##ArgType), zig_compiler_rt_abbrev_##ResType), version)(extern_arg); \ + memcpy(&res, &extern_res, sizeof(res)); \ + return extern_res; \ + } +zig_convert_builtin(zig_compiler_rt_f16, zig_f16, trunc, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_compiler_rt_f16, zig_f16, trunc, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f16, zig_f16, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f32, zig_f32, extend, zig_compiler_rt_f16, zig_f16, 2) +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f64, zig_f64, extend, zig_compiler_rt_f16, zig_f16, 2) +zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f80, zig_f80, 2) +zig_convert_builtin(zig_f64, zig_f64, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f16, zig_f16, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_f80, zig_f80, extend, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f80, zig_f80, trunc, zig_f128, zig_f128, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f16, zig_f16, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f32, zig_f32, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f128, zig_f128, extend, zig_f80, zig_f80, 2) + +#ifdef __ARM_EABI__ + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_d2f(zig_f64); +static inline zig_f32 zig_truncdfsf(zig_f64 arg) { return __aeabi_d2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_f2d(zig_f32); +static inline zig_f64 zig_extendsfdf(zig_f32 arg) { return __aeabi_f2d(arg); } + +#else /* __ARM_EABI__ */ + +zig_convert_builtin(zig_f32, zig_f32, trunc, zig_f64, zig_f64, 2) +zig_convert_builtin(zig_f64, zig_f64, extend, zig_f32, zig_f32, 2) + +#endif /* __ARM_EABI__ */ + +#define zig_float_negate_builtin_0(w, c, sb) \ + zig_expand_concat(zig_xor_, zig_repr_f##w)(arg, zig_make_f##w(-0x0.0p0, c sb)) +#define zig_float_negate_builtin_1(w, c, sb) -arg +#define zig_float_negate_builtin(w, c, sb) \ + static inline zig_f##w zig_neg_f##w(zig_f##w arg) { \ + return zig_expand_concat(zig_float_negate_builtin_, zig_has_f##w)(w, c, sb); \ + } +zig_float_negate_builtin(16, , UINT16_C(1) << 15 ) +zig_float_negate_builtin(32, , UINT32_C(1) << 31 ) +zig_float_negate_builtin(64, , UINT64_C(1) << 63 ) +zig_float_negate_builtin(80, zig_make_u128, (UINT64_C(1) << 15, UINT64_C(0))) +zig_float_negate_builtin(128, zig_make_u128, (UINT64_C(1) << 63, UINT64_C(0))) + +#define zig_float_less_builtin_0(Type, operation) \ + zig_extern int32_t zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_zig_##Type), 2)(zig_##Type, zig_##Type); \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 2)(lhs, rhs); \ + } +#define zig_float_less_builtin_1(Type, operation) \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return (!(lhs <= rhs) - (lhs < rhs)); \ + } + +#define zig_float_greater_builtin_0(Type, operation) \ + zig_float_less_builtin_0(Type, operation) +#define zig_float_greater_builtin_1(Type, operation) \ + static inline int32_t zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return ((lhs > rhs) - !(lhs >= rhs)); \ + } + +#define zig_float_binary_builtin_0(Type, operation, operator) \ + zig_extern zig_##Type zig_expand_concat(zig_expand_concat(__##operation, \ + zig_compiler_rt_abbrev_zig_##Type), 3)(zig_##Type, zig_##Type); \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return zig_expand_concat(zig_expand_concat(__##operation, zig_compiler_rt_abbrev_zig_##Type), 3)(lhs, rhs); \ + } +#define zig_float_binary_builtin_1(Type, operation, operator) \ + static inline zig_##Type zig_##operation##_##Type(zig_##Type lhs, zig_##Type rhs) { \ + return lhs operator rhs; \ + } + +#define zig_common_float_builtins(w) \ + zig_convert_builtin( int64_t, int64_t, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_i128, zig_i128, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_u128, zig_u128, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, int64_t, int64_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, zig_i128, zig_i128, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, zig_u128, zig_u128, ) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, cmp) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, ne) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, eq) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, lt) \ + zig_expand_concat(zig_float_less_builtin_, zig_has_f##w)(f##w, le) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_f##w)(f##w, gt) \ + zig_expand_concat(zig_float_greater_builtin_, zig_has_f##w)(f##w, ge) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, add, +) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, sub, -) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, mul, *) \ + zig_expand_concat(zig_float_binary_builtin_, zig_has_f##w)(f##w, div, /) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sqrt)))(zig_f##w, zig_sqrt_f##w, zig_libc_name_f##w(sqrt), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(sin)))(zig_f##w, zig_sin_f##w, zig_libc_name_f##w(sin), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(cos)))(zig_f##w, zig_cos_f##w, zig_libc_name_f##w(cos), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(tan)))(zig_f##w, zig_tan_f##w, zig_libc_name_f##w(tan), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp)))(zig_f##w, zig_exp_f##w, zig_libc_name_f##w(exp), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(exp2)))(zig_f##w, zig_exp2_f##w, zig_libc_name_f##w(exp2), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log)))(zig_f##w, zig_log_f##w, zig_libc_name_f##w(log), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log2)))(zig_f##w, zig_log2_f##w, zig_libc_name_f##w(log2), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(log10)))(zig_f##w, zig_log10_f##w, zig_libc_name_f##w(log10), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fabs)))(zig_f##w, zig_abs_f##w, zig_libc_name_f##w(fabs), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(floor)))(zig_f##w, zig_floor_f##w, zig_libc_name_f##w(floor), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(ceil)))(zig_f##w, zig_ceil_f##w, zig_libc_name_f##w(ceil), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(round)))(zig_f##w, zig_round_f##w, zig_libc_name_f##w(round), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(trunc)))(zig_f##w, zig_trunc_f##w, zig_libc_name_f##w(trunc), (zig_f##w x), (x)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmod)))(zig_f##w, zig_fmod_f##w, zig_libc_name_f##w(fmod), (zig_f##w x, zig_f##w y), (x, y)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmin)))(zig_f##w, zig_min_f##w, zig_libc_name_f##w(fmin), (zig_f##w x, zig_f##w y), (x, y)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fmax)))(zig_f##w, zig_max_f##w, zig_libc_name_f##w(fmax), (zig_f##w x, zig_f##w y), (x, y)) \ + zig_expand_concat(zig_expand_import_, zig_expand_has_builtin(zig_libc_name_f##w(fma)))(zig_f##w, zig_fma_f##w, zig_libc_name_f##w(fma), (zig_f##w x, zig_f##w y, zig_f##w z), (x, y, z)) \ +\ + static inline zig_f##w zig_div_trunc_f##w(zig_f##w lhs, zig_f##w rhs) { \ + return zig_trunc_f##w(zig_div_f##w(lhs, rhs)); \ + } \ +\ + static inline zig_f##w zig_div_floor_f##w(zig_f##w lhs, zig_f##w rhs) { \ + return zig_floor_f##w(zig_div_f##w(lhs, rhs)); \ + } \ +\ + static inline zig_f##w zig_mod_f##w(zig_f##w lhs, zig_f##w rhs) { \ + return zig_sub_f##w(lhs, zig_mul_f##w(zig_div_floor_f##w(lhs, rhs), rhs)); \ + } +zig_common_float_builtins(16) +zig_common_float_builtins(32) +zig_common_float_builtins(64) +zig_common_float_builtins(80) +zig_common_float_builtins(128) + +#define zig_float_builtins(w) \ + zig_convert_builtin( int32_t, int32_t, fix, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint32_t, uint32_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(uint64_t, uint64_t, fixuns, zig_f##w, zig_f##w, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, float, int32_t, int32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint32_t, uint32_t, ) \ + zig_convert_builtin(zig_f##w, zig_f##w, floatun, uint64_t, uint64_t, ) +zig_float_builtins(16) +zig_float_builtins(80) +zig_float_builtins(128) + +#ifdef __ARM_EABI__ + +zig_extern zig_callconv(pcs("aapcs")) int32_t __aeabi_f2iz(zig_f32); +static inline int32_t zig_fixsfsi(zig_f32 arg) { return __aeabi_f2iz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint32_t __aeabi_f2uiz(zig_f32); +static inline uint32_t zig_fixunssfsi(zig_f32 arg) { return __aeabi_f2uiz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint64_t __aeabi_f2ulz(zig_f32); +static inline uint64_t zig_fixunssfdi(zig_f32 arg) { return __aeabi_f2ulz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_i2f(int32_t); +static inline zig_f32 zig_floatsisf(int32_t arg) { return __aeabi_i2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_ui2f(uint32_t); +static inline zig_f32 zig_floatunsisf(uint32_t arg) { return __aeabi_ui2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f32 __aeabi_ul2f(uint64_t); +static inline zig_f32 zig_floatundisf(uint64_t arg) { return __aeabi_ul2f(arg); } + +zig_extern zig_callconv(pcs("aapcs")) int32_t __aeabi_d2iz(zig_f64); +static inline int32_t zig_fixdfsi(zig_f64 arg) { return __aeabi_d2iz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint32_t __aeabi_d2uiz(zig_f64); +static inline uint32_t zig_fixunsdfsi(zig_f64 arg) { return __aeabi_d2uiz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) uint64_t __aeabi_d2ulz(zig_f64); +static inline uint64_t zig_fixunsdfdi(zig_f64 arg) { return __aeabi_d2ulz(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_i2d(int32_t); +static inline zig_f64 zig_floatsidf(int32_t arg) { return __aeabi_i2d(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_ui2d(uint32_t); +static inline zig_f64 zig_floatunsidf(uint32_t arg) { return __aeabi_ui2d(arg); } + +zig_extern zig_callconv(pcs("aapcs")) zig_f64 __aeabi_ul2d(uint64_t); +static inline zig_f64 zig_floatundidf(uint64_t arg) { return __aeabi_ul2d(arg); } + +#else /* __ARM_EABI__ */ + +zig_float_builtins(32) +zig_float_builtins(64) + +#endif /* __ARM_EABI__ */ + +/* ============================ Atomics Support ============================= */ + +/* Note that atomics should be implemented as macros because most + compilers silently discard runtime atomic order information. */ + +/* Define fallback implementations first that can later be undef'd on compilers with builtin support. */ +/* Note that zig_atomicrmw_expected is needed to handle aliasing between res and arg. */ +#define zig_atomicrmw_xchg_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, arg, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_add_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_add_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_sub_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_sub_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_min_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_min_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_max_float(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_max_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) + +#define zig_atomicrmw_xchg_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, arg, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_add_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_add_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_sub_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_sub_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_and_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_and_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_nand_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_not_##Type(zig_and_##Type(zig_atomicrmw_expected, arg), 128); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_or_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_or_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_xor_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_xor_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_min_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_min_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) +#define zig_atomicrmw_max_int128(res, obj, arg, order, Type, ReprType) do { \ + zig_##Type zig_atomicrmw_expected; \ + zig_##Type zig_atomicrmw_desired; \ + zig_atomic_load(zig_atomicrmw_expected, obj, zig_memory_order_relaxed, Type, ReprType); \ + do { \ + zig_atomicrmw_desired = zig_max_##Type(zig_atomicrmw_expected, arg); \ + } while (!zig_cmpxchg_weak(obj, zig_atomicrmw_expected, zig_atomicrmw_desired, order, zig_memory_order_relaxed, Type, ReprType)); \ + res = zig_atomicrmw_expected; \ +} while (0) + +#if (__STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)) || (zig_has_include() && !defined(zig_msvc)) +#define zig_c11_atomics +#endif + +#if defined(zig_c11_atomics) +#include +typedef enum memory_order zig_memory_order; +#define zig_memory_order_relaxed memory_order_relaxed +#define zig_memory_order_acquire memory_order_acquire +#define zig_memory_order_release memory_order_release +#define zig_memory_order_acq_rel memory_order_acq_rel +#define zig_memory_order_seq_cst memory_order_seq_cst +#define zig_atomic(Type) _Atomic(Type) +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) res = atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store( obj, arg, order, Type, ReprType) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load(res, obj, order, Type, ReprType) res = atomic_load_explicit (obj, order) +#undef zig_atomicrmw_xchg_float +#define zig_atomicrmw_xchg_float zig_atomicrmw_xchg +#undef zig_atomicrmw_add_float +#define zig_atomicrmw_add_float zig_atomicrmw_add +#undef zig_atomicrmw_sub_float +#define zig_atomicrmw_sub_float zig_atomicrmw_sub +#elif defined(zig_gnuc) +typedef int zig_memory_order; +#define zig_memory_order_relaxed __ATOMIC_RELAXED +#define zig_memory_order_acquire __ATOMIC_ACQUIRE +#define zig_memory_order_release __ATOMIC_RELEASE +#define zig_memory_order_acq_rel __ATOMIC_ACQ_REL +#define zig_memory_order_seq_cst __ATOMIC_SEQ_CST +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) __atomic_compare_exchange(obj, (ReprType *)&(expected), (ReprType *)&(desired), false, succ, fail) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) __atomic_compare_exchange(obj, (ReprType *)&(expected), (ReprType *)&(desired), true, succ, fail) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) __atomic_exchange(obj, (ReprType *)&(arg), &(res), order) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store( obj, arg, order, Type, ReprType) __atomic_store (obj, (ReprType *)&(arg), order) +#define zig_atomic_load(res, obj, order, Type, ReprType) __atomic_load (obj, &(res), order) +#undef zig_atomicrmw_xchg_float +#define zig_atomicrmw_xchg_float zig_atomicrmw_xchg +#elif defined(zig_msvc) && defined(zig_x86) +#define zig_memory_order_relaxed 0 +#define zig_memory_order_acquire 2 +#define zig_memory_order_release 3 +#define zig_memory_order_acq_rel 4 +#define zig_memory_order_seq_cst 5 +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) zig_msvc_cmpxchg_##Type(obj, &(expected), desired) +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) zig_cmpxchg_strong(obj, expected, desired, succ, fail, Type, ReprType) +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_xchg_##Type(obj, arg) +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_add_ ##Type(obj, arg) +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_sub_ ##Type(obj, arg) +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_or_ ##Type(obj, arg) +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_xor_ ##Type(obj, arg) +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_and_ ##Type(obj, arg) +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_nand_##Type(obj, arg) +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_min_ ##Type(obj, arg) +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) res = zig_msvc_atomicrmw_max_ ##Type(obj, arg) +#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_msvc_atomic_store_ ##Type(obj, arg) +#define zig_atomic_load(res, obj, order, Type, ReprType) res = zig_msvc_atomic_load_ ##order##_##Type(obj) +/* TODO: zig_msvc && (zig_thumb || zig_aarch64) */ +#else +#define zig_memory_order_relaxed 0 +#define zig_memory_order_acquire 2 +#define zig_memory_order_release 3 +#define zig_memory_order_acq_rel 4 +#define zig_memory_order_seq_cst 5 +#define zig_atomic(Type) Type +#define zig_cmpxchg_strong( obj, expected, desired, succ, fail, Type, ReprType) zig_atomics_unavailable +#define zig_cmpxchg_weak( obj, expected, desired, succ, fail, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_xchg(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_add(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_sub(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_or(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_xor(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_and(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_nand(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_min(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomicrmw_max(res, obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomic_store( obj, arg, order, Type, ReprType) zig_atomics_unavailable +#define zig_atomic_load(res, obj, order, Type, ReprType) zig_atomics_unavailable +#endif + +#if !defined(zig_c11_atomics) && defined(zig_msvc) && defined(zig_x86) + +/* TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 */ + +#define zig_msvc_atomics(ZigType, Type, SigType, suffix, iso_suffix) \ + static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \ + Type comparand = *expected; \ + Type initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, (SigType)desired, (SigType)comparand); \ + bool exchanged = initial == comparand; \ + if (!exchanged) { \ + *expected = initial; \ + } \ + return exchanged; \ + } \ + static inline Type zig_msvc_atomicrmw_xchg_##ZigType(Type volatile* obj, Type value) { \ + return _InterlockedExchange##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomicrmw_add_##ZigType(Type volatile* obj, Type value) { \ + return _InterlockedExchangeAdd##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomicrmw_sub_##ZigType(Type volatile* obj, Type value) { \ + bool success = false; \ + Type new; \ + Type prev; \ + while (!success) { \ + prev = *obj; \ + new = prev - value; \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline Type zig_msvc_atomicrmw_or_##ZigType(Type volatile* obj, Type value) { \ + return _InterlockedOr##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomicrmw_xor_##ZigType(Type volatile* obj, Type value) { \ + return _InterlockedXor##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomicrmw_and_##ZigType(Type volatile* obj, Type value) { \ + return _InterlockedAnd##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomicrmw_nand_##ZigType(Type volatile* obj, Type value) { \ + bool success = false; \ + Type new; \ + Type prev; \ + while (!success) { \ + prev = *obj; \ + new = ~(prev & value); \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline Type zig_msvc_atomicrmw_min_##ZigType(Type volatile* obj, Type value) { \ + bool success = false; \ + Type new; \ + Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value < prev ? value : prev; \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline Type zig_msvc_atomicrmw_max_##ZigType(Type volatile* obj, Type value) { \ + bool success = false; \ + Type new; \ + Type prev; \ + while (!success) { \ + prev = *obj; \ + new = value > prev ? value : prev; \ + success = zig_msvc_cmpxchg_##ZigType(obj, &prev, new); \ + } \ + return prev; \ + } \ + static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \ + (void)_InterlockedExchange##suffix((SigType volatile*)obj, (SigType)value); \ + } \ + static inline Type zig_msvc_atomic_load_zig_memory_order_relaxed_##ZigType(Type volatile* obj) { \ + return __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + } \ + static inline Type zig_msvc_atomic_load_zig_memory_order_acquire_##ZigType(Type volatile* obj) { \ + Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + _ReadWriteBarrier(); \ + return val; \ + } \ + static inline Type zig_msvc_atomic_load_zig_memory_order_seq_cst_##ZigType(Type volatile* obj) { \ + Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + _ReadWriteBarrier(); \ + return val; \ + } + +zig_msvc_atomics( u8, uint8_t, char, 8, 8) +zig_msvc_atomics( i8, int8_t, char, 8, 8) +zig_msvc_atomics(u16, uint16_t, short, 16, 16) +zig_msvc_atomics(i16, int16_t, short, 16, 16) +zig_msvc_atomics(u32, uint32_t, long, , 32) +zig_msvc_atomics(i32, int32_t, long, , 32) + +#if defined(zig_x86_64) +zig_msvc_atomics(u64, uint64_t, __int64, 64, 64) +zig_msvc_atomics(i64, int64_t, __int64, 64, 64) +#endif + +#define zig_msvc_flt_atomics(Type, SigType, suffix, iso_suffix) \ + static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \ + SigType exchange; \ + SigType comparand; \ + SigType initial; \ + bool success; \ + memcpy(&comparand, expected, sizeof(comparand)); \ + memcpy(&exchange, &desired, sizeof(exchange)); \ + initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, exchange, comparand); \ + success = initial == comparand; \ + if (!success) memcpy(expected, &initial, sizeof(*expected)); \ + return success; \ + } \ + static inline void zig_msvc_atomic_store_##Type(zig_##Type volatile* obj, zig_##Type arg) { \ + SigType value; \ + memcpy(&value, &arg, sizeof(value)); \ + (void)_InterlockedExchange##suffix((SigType volatile*)obj, value); \ + } \ + static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_relaxed_##Type(zig_##Type volatile* obj) { \ + zig_##Type result; \ + SigType initial = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + memcpy(&result, &initial, sizeof(result)); \ + return result; \ + } \ + static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_acquire_##Type(zig_##Type volatile* obj) { \ + zig_##Type result; \ + SigType initial = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + _ReadWriteBarrier(); \ + memcpy(&result, &initial, sizeof(result)); \ + return result; \ + } \ + static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_seq_cst_##Type(zig_##Type volatile* obj) { \ + zig_##Type result; \ + SigType initial = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \ + _ReadWriteBarrier(); \ + memcpy(&result, &initial, sizeof(result)); \ + return result; \ + } + +zig_msvc_flt_atomics(f32, long, , 32) +#if defined(zig_x86_64) +zig_msvc_flt_atomics(f64, int64_t, 64, 64) +#endif + +#if defined(zig_x86_32) +static inline void zig_msvc_atomic_barrier() { + int32_t barrier; + __asm { + xchg barrier, eax + } +} + +static inline void* zig_msvc_atomicrmw_xchg_p32(void volatile* obj, void* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p32(void volatile* obj, void* arg) { + (void)_InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_relaxed_p32(void volatile* obj) { + return (void*)__iso_volatile_load32(obj); +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_acquire_p32(void volatile* obj) { + void* val = (void*)__iso_volatile_load32(obj); + _ReadWriteBarrier(); + return val; +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_seq_cst_p32(void volatile* obj) { + return zig_msvc_atomic_load_zig_memory_order_acquire_p32(obj); +} + +static inline bool zig_msvc_cmpxchg_p32(void volatile* obj, void* expected, void* desired) { + void* comparand = *(void**)expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool success = initial == comparand; + if (!success) *(void**)expected = initial; + return success; +} +#else /* zig_x86_32 */ +static inline void* zig_msvc_atomicrmw_xchg_p64(void volatile* obj, void* arg) { + return _InterlockedExchangePointer(obj, arg); +} + +static inline void zig_msvc_atomic_store_p64(void volatile* obj, void* arg) { + (void)_InterlockedExchangePointer(obj, arg); +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_relaxed_p64(void volatile* obj) { + return (void*)__iso_volatile_load64(obj); +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_acquire_p64(void volatile* obj) { + void* val = (void*)__iso_volatile_load64(obj); + _ReadWriteBarrier(); + return val; +} + +static inline void* zig_msvc_atomic_load_zig_memory_order_seq_cst_p64(void volatile* obj) { + return zig_msvc_atomic_load_zig_memory_order_acquire_p64(obj); +} + +static inline bool zig_msvc_cmpxchg_p64(void volatile* obj, void* expected, void* desired) { + void* comparand = *(void**)expected; + void* initial = _InterlockedCompareExchangePointer(obj, desired, comparand); + bool success = initial == comparand; + if (!success) *(void**)expected = initial; + return success; +} + +static inline bool zig_msvc_cmpxchg_u128(zig_u128 volatile* obj, zig_u128* expected, zig_u128 desired) { + return _InterlockedCompareExchange128((__int64 volatile*)obj, (__int64)zig_hi_u128(desired), (__int64)zig_lo_u128(desired), (__int64*)expected); +} + +static inline zig_u128 zig_msvc_atomic_load_u128(zig_u128 volatile* obj) { + zig_u128 expected = zig_make_u128(UINT64_C(0), UINT64_C(0)); + (void)zig_cmpxchg_strong(obj, expected, expected, zig_memory_order_seq_cst, zig_memory_order_seq_cst, u128, zig_u128); + return expected; +} + +static inline void zig_msvc_atomic_store_u128(zig_u128 volatile* obj, zig_u128 arg) { + zig_u128 expected = zig_make_u128(UINT64_C(0), UINT64_C(0)); + while (!zig_cmpxchg_weak(obj, expected, arg, zig_memory_order_seq_cst, zig_memory_order_seq_cst, u128, zig_u128)); +} + +static inline bool zig_msvc_cmpxchg_i128(zig_i128 volatile* obj, zig_i128* expected, zig_i128 desired) { + return _InterlockedCompareExchange128((__int64 volatile*)obj, (__int64)zig_hi_i128(desired), (__int64)zig_lo_i128(desired), (__int64*)expected); +} + +static inline zig_i128 zig_msvc_atomic_load_i128(zig_i128 volatile* obj) { + zig_i128 expected = zig_make_i128(INT64_C(0), UINT64_C(0)); + (void)zig_cmpxchg_strong(obj, expected, expected, zig_memory_order_seq_cst, zig_memory_order_seq_cst, i128, zig_i128); + return expected; +} + +static inline void zig_msvc_atomic_store_i128(zig_i128 volatile* obj, zig_i128 arg) { + zig_i128 expected = zig_make_i128(INT64_C(0), UINT64_C(0)); + while (!zig_cmpxchg_weak(obj, expected, arg, zig_memory_order_seq_cst, zig_memory_order_seq_cst, i128, zig_i128)); +} + +#endif /* zig_x86_32 */ + +#endif /* !zig_c11_atomics && zig_msvc && zig_x86 */ + +/* ======================== Special Case Intrinsics ========================= */ + +#if defined(zig_msvc) +#include +#endif + +#if defined(zig_thumb) + +static inline void* zig_thumb_windows_teb(void) { + void* teb = 0; +#if defined(zig_msvc) + teb = (void*)_MoveFromCoprocessor(15, 0, 13, 0, 2); +#elif defined(zig_gnuc_asm) + __asm__ ("mrc p15, 0, %[ptr], c13, c0, 2" : [ptr] "=r" (teb)); +#endif + return teb; +} + +#elif defined(zig_aarch64) + +static inline void* zig_aarch64_windows_teb(void) { + void* teb = 0; +#if defined(zig_msvc) + teb = (void*)__readx18qword(0x0); +#elif defined(zig_gnuc_asm) + __asm__ ("mov %[ptr], x18" : [ptr] "=r" (teb)); +#endif + return teb; +} + +#elif defined(zig_x86_32) + +static inline void* zig_x86_windows_teb(void) { + void* teb = 0; +#if defined(zig_msvc) + teb = (void*)__readfsdword(0x18); +#elif defined(zig_gnuc_asm) + __asm__ ("movl %%fs:0x18, %[ptr]" : [ptr] "=r" (teb)); +#endif + return teb; +} + +#elif defined(zig_x86_64) + +static inline void* zig_x86_64_windows_teb(void) { + void* teb = 0; +#if defined(zig_msvc) + teb = (void*)__readgsqword(0x30); +#elif defined(zig_gnuc_asm) + __asm__ ("movq %%gs:0x30, %[ptr]" : [ptr] "=r" (teb)); +#endif + return teb; +} + +#endif + +#if defined(zig_x86) + +static inline void zig_x86_cpuid(uint32_t leaf_id, uint32_t subid, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) { +#if defined(zig_msvc) + int cpu_info[4]; + __cpuidex(cpu_info, leaf_id, subid); + *eax = (uint32_t)cpu_info[0]; + *ebx = (uint32_t)cpu_info[1]; + *ecx = (uint32_t)cpu_info[2]; + *edx = (uint32_t)cpu_info[3]; +#elif defined(zig_gnuc_asm) + __asm__("cpuid" : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) : "a"(leaf_id), "c"(subid)); +#else + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; +#endif +} + +static inline uint32_t zig_x86_get_xcr0(void) { +#if defined(zig_msvc) + return (uint32_t)_xgetbv(0); +#elif defined(zig_gnuc_asm) + uint32_t eax; + uint32_t edx; + __asm__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0)); + return eax; +#else + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; +#endif +} + +#endif diff --git a/testing/capi/vendor.sh b/testing/capi/vendor.sh new file mode 100755 index 00000000..75a3eb6c --- /dev/null +++ b/testing/capi/vendor.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -euo pipefail + +INSTRUMENT_HOOKS_COMMIT="d094ae4d6aa0be555a1016cfbbea74e34b0ed555" +INSTRUMENT_HOOKS_URL="https://github.com/CodSpeedHQ/instrument-hooks/archive/${INSTRUMENT_HOOKS_COMMIT}.tar.gz" +VENDOR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +TEMP_DIR=$(mktemp -d) + +rm -rf "${VENDOR_DIR}/instrument-hooks" + +# Download and extract the instrument-hooks library +curl -L "${INSTRUMENT_HOOKS_URL}" -o "${TEMP_DIR}/instrument-hooks.tar.gz" +tar -xzf "${TEMP_DIR}/instrument-hooks.tar.gz" -C "${TEMP_DIR}" + +# Copy only the dist and includes directories to the vendor directory +mkdir -p "${VENDOR_DIR}/instrument-hooks/" +cp -r "${TEMP_DIR}/instrument-hooks-${INSTRUMENT_HOOKS_COMMIT}/dist" "${VENDOR_DIR}/instrument-hooks/" +cp -r "${TEMP_DIR}/instrument-hooks-${INSTRUMENT_HOOKS_COMMIT}/includes" "${VENDOR_DIR}/instrument-hooks/" + +# Clean up +rm -rf "${TEMP_DIR}" From 16cedc0ba2c4aa0600a67d9b344f74662a2e0f83 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Wed, 27 Aug 2025 18:08:29 +0200 Subject: [PATCH 03/17] feat: add perf support --- go-runner/src/runner.rs | 17 +++++-- testing/capi/instrument-hooks.go | 81 ++++++++++++++++++++++++++++++++ testing/testing/benchmark.go | 25 ++++++++++ testing/testing/testing.go | 3 -- 4 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 testing/capi/instrument-hooks.go diff --git a/go-runner/src/runner.rs b/go-runner/src/runner.rs index 20440e1c..353dddb7 100644 --- a/go-runner/src/runner.rs +++ b/go-runner/src/runner.rs @@ -13,11 +13,20 @@ pub fn run>(runner_go_path: P, run_args: &[&str]) -> anyhow::Resu module_root ); - // Run go run -tags=codspeed {args} + // Set the integration version in our testing library + let ldflags = format!( + "-X github.com/CodSpeedHQ/codspeed-go/testing/capi.integrationVersion={}", + env!("CARGO_PKG_VERSION") + ); + let args = vec![ + "run", + "-tags=codspeed", + "-ldflags", + &ldflags, + relative_path.to_str().unwrap(), + ]; let output = Command::new("go") - .arg("run") - .arg("-tags=codspeed") - .arg(relative_path) + .args(&args) .args(run_args) .current_dir(module_root) .stdout(std::process::Stdio::inherit()) diff --git a/testing/capi/instrument-hooks.go b/testing/capi/instrument-hooks.go new file mode 100644 index 00000000..7c03e85c --- /dev/null +++ b/testing/capi/instrument-hooks.go @@ -0,0 +1,81 @@ +package capi + +/* +#cgo CFLAGS: -I${SRCDIR}/instrument-hooks/includes +#include "instrument-hooks/dist/core.c" +typedef struct instruments_root_InstrumentHooks__547 InstrumentHooks; +*/ +import "C" +import ( + "runtime" + "unsafe" +) + +// This will be set in the go-runner +var integrationVersion = "dev" + +type InstrumentHooks struct { + hooks *C.InstrumentHooks +} + +func NewInstrumentHooks() *InstrumentHooks { + inst := &InstrumentHooks{ + hooks: C.instrument_hooks_init(), + } + runtime.SetFinalizer(inst, (*InstrumentHooks).cleanup) + inst.SetIntegration("codspeed-go", integrationVersion) + return inst +} + +func (i *InstrumentHooks) Close() { + if i.hooks != nil { + C.instrument_hooks_deinit(i.hooks) + i.hooks = nil + runtime.SetFinalizer(i, nil) + } +} + +func (i *InstrumentHooks) cleanup() { + i.Close() +} + +func (i *InstrumentHooks) SetIntegration(name, version string) { + if i.hooks == nil { + return + } + nameC := C.CString(name) + versionC := C.CString(version) + defer C.free(unsafe.Pointer(nameC)) + defer C.free(unsafe.Pointer(versionC)) + + C.instrument_hooks_set_integration(i.hooks, (*C.uint8_t)(unsafe.Pointer(nameC)), (*C.uint8_t)(unsafe.Pointer(versionC))) +} + +func (i *InstrumentHooks) StartBenchmark() { + if i.hooks != nil { + C.instrument_hooks_start_benchmark(i.hooks) + } +} + +func (i *InstrumentHooks) StopBenchmark() { + if i.hooks != nil { + C.instrument_hooks_stop_benchmark(i.hooks) + } +} + +func (i *InstrumentHooks) SetExecutedBenchmark(pid uint32, name string) { + if i.hooks == nil { + return + } + nameC := C.CString(name) + defer C.free(unsafe.Pointer(nameC)) + + C.instrument_hooks_set_executed_benchmark(i.hooks, C.uint(pid), (*C.uint8_t)(unsafe.Pointer(nameC))) +} + +func (i *InstrumentHooks) IsInstrumented() bool { + if i.hooks == nil { + return false + } + return bool(C.instrument_hooks_is_instrumented(i.hooks)) +} diff --git a/testing/testing/benchmark.go b/testing/testing/benchmark.go index 0aec2f8c..ac7b40d4 100644 --- a/testing/testing/benchmark.go +++ b/testing/testing/benchmark.go @@ -24,6 +24,7 @@ import ( "time" "unicode" + "github.com/CodSpeedHQ/codspeed-go/testing/capi" "github.com/CodSpeedHQ/codspeed-go/testing/internal/sysinfo" ) @@ -83,6 +84,13 @@ type InternalBenchmark struct { F func(b *B) } +type codspeed struct { + instrument_hooks *capi.InstrumentHooks + + codspeedTimePerRoundNs []time.Duration + codspeedItersPerRound []int64 +} + // B is a type passed to [Benchmark] functions to manage benchmark // timing and control the number of iterations. // @@ -98,6 +106,7 @@ type InternalBenchmark struct { // affecting benchmark results. type B struct { common + codspeed importPath string // import path of the package containing the benchmark bstate *benchState N int @@ -388,9 +397,11 @@ func (b *B) launch() { roundN = benchN / int(rounds) } + b.codspeed.instrument_hooks.StartBenchmark() for range rounds { b.runN(int(roundN)) } + b.codspeed.instrument_hooks.StopBenchmark() } } b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.codspeedTimePerRoundNs, b.codspeedItersPerRound, b.extra} @@ -431,6 +442,7 @@ func (b *B) stopOrScaleBLoop() bool { if t >= b.benchTime.d { // Stop the timer so we don't count cleanup time b.StopTimer() + b.codspeed.instrument_hooks.StopBenchmark() // Commit iteration count b.N = int(b.loop.n) b.loop.done = true @@ -472,6 +484,7 @@ func (b *B) loopSlowPath() bool { b.codspeedItersPerRound = make([]int64, 0) b.codspeedTimePerRoundNs = make([]time.Duration, 0) + b.codspeed.instrument_hooks.StartBenchmark() b.ResetTimer() b.StartTimer() return true @@ -486,6 +499,7 @@ func (b *B) loopSlowPath() bool { return true } b.StopTimer() + b.codspeed.instrument_hooks.StopBenchmark() // Commit iteration count b.N = int(b.loop.n) b.loop.done = true @@ -754,6 +768,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e w: os.Stdout, bench: true, }, + codspeed: codspeed{ + instrument_hooks: capi.NewInstrumentHooks(), + }, importPath: importPath, benchFunc: func(b *B) { for _, Benchmark := range bs { @@ -763,6 +780,8 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e benchTime: benchTime, bstate: bstate, } + defer main.codspeed.instrument_hooks.Close() + if Verbose() { main.chatty = newChattyPrinter(main.w) } @@ -791,6 +810,7 @@ func (s *benchState) processBench(b *B) { chatty: b.chatty, bench: true, }, + codspeed: b.codspeed, benchFunc: b.benchFunc, benchTime: b.benchTime, } @@ -875,6 +895,10 @@ func (s *benchState) processBench(b *B) { continue } defer file.Close() + + // Send pid and executed benchmark to the runner + b.codspeed.instrument_hooks.SetExecutedBenchmark(uint32(os.Getpid()), customBenchName) + // END CODSPEED // ############################################################################################ @@ -938,6 +962,7 @@ func (b *B) Run(name string, f func(b *B)) bool { chatty: b.chatty, bench: true, }, + codspeed: b.codspeed, importPath: b.importPath, benchFunc: f, benchTime: b.benchTime, diff --git a/testing/testing/testing.go b/testing/testing/testing.go index 34b5b660..899f9048 100644 --- a/testing/testing/testing.go +++ b/testing/testing/testing.go @@ -666,9 +666,6 @@ type common struct { ctx context.Context cancelCtx context.CancelFunc - - codspeedTimePerRoundNs []time.Duration - codspeedItersPerRound []int64 } // Short reports whether the -test.short flag is set. From 497652f0754971dbdfb5cef46fd3c5bc7f13d1db Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 10:37:28 +0200 Subject: [PATCH 04/17] feat: add codspeed root frame --- testing/testing/benchmark.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testing/testing/benchmark.go b/testing/testing/benchmark.go index ac7b40d4..146e8208 100644 --- a/testing/testing/benchmark.go +++ b/testing/testing/benchmark.go @@ -210,6 +210,11 @@ func (b *B) ReportAllocs() { // runN runs a single benchmark for the specified number of iterations. func (b *B) runN(n int) { + b.__codspeed_root_frame__runN(n) +} + +//go:noinline +func (b *B) __codspeed_root_frame__runN(n int) { benchmarkLock.Lock() defer benchmarkLock.Unlock() ctx, cancelCtx := context.WithCancel(context.Background()) From 91114db75d6f991761918b1b41bbb988a87e14a9 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Thu, 28 Aug 2025 20:08:55 +0200 Subject: [PATCH 05/17] fix(go-runner): build binary, then run it This is needed so that the runner is able to dump the symbols of the benchmark process. It also makes it easier testable as we can run the binary without go-runner at a later point. --- go-runner/src/builder/mod.rs | 57 +++++++++++++++++++ go-runner/src/lib.rs | 7 ++- go-runner/src/runner.rs | 42 ++++++-------- ...results_snapshots@golang-benchmarks_4.snap | 2 +- 4 files changed, 79 insertions(+), 29 deletions(-) diff --git a/go-runner/src/builder/mod.rs b/go-runner/src/builder/mod.rs index d53e94b7..77b7e338 100644 --- a/go-runner/src/builder/mod.rs +++ b/go-runner/src/builder/mod.rs @@ -4,3 +4,60 @@ pub mod templater; pub mod verifier; pub use discovery::*; + +use crate::prelude::*; +use std::{path::Path, process::Command}; + +/// Builds a Go runner file into an executable binary +pub fn build_binary>(runner_go_path: P) -> anyhow::Result { + let runner_go_path = runner_go_path.as_ref(); + let file_dir = runner_go_path.parent().unwrap(); + let module_root = file_dir.parent().unwrap(); + let relative_path = runner_go_path.strip_prefix(module_root).unwrap(); + + debug!( + "Building codspeed runner binary: {:?} (root = {:?})", + module_root.join(relative_path), + module_root + ); + + let binary_path = runner_go_path.with_extension("bin"); + + // Set the integration version in our testing library and include debug symbols + let ldflags = format!( + "-X github.com/CodSpeedHQ/codspeed-go/testing/capi.integrationVersion={} -s=false -w=false", + env!("CARGO_PKG_VERSION") + ); + + let args = vec![ + "build", + "-tags=codspeed", + "-ldflags", + &ldflags, + "-o", + binary_path.to_str().unwrap(), + relative_path.to_str().unwrap(), + ]; + + let output = Command::new("go") + .args(&args) + .current_dir(module_root) + .output() + .context("Failed to execute go build command")?; + + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + warn!("Build command output: {stdout}"); + warn!("Build command error output: {stderr}"); + + bail!( + "Failed to build benchmark binary. Exit status: {}", + output.status + ); + } + + debug!("Successfully built binary: {binary_path:?}"); + Ok(binary_path) +} diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index 726aaae4..718c173a 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -37,11 +37,14 @@ pub fn run_benchmarks(project_dir: &Path, bench: &str) -> anyhow::Result<()> { info!("Found {name:30} in {path:?}"); } - // 2. Generate codspeed runners and execute them + // 2. Generate codspeed runners, build binaries, and execute them for package in &packages { info!("Generating custom runner for package: {}", package.name); let (_target_dir, runner_path) = builder::templater::run(package)?; + info!("Building binary for package: {}", package.name); + let binary_path = builder::build_binary(&runner_path)?; + let args = [ "-test.bench", bench, @@ -55,7 +58,7 @@ pub fn run_benchmarks(project_dir: &Path, bench: &str) -> anyhow::Result<()> { ]; info!("Running benchmarks for package: {}", package.name); - runner::run(&runner_path, &args)?; + runner::run(&binary_path, &args)?; } // 3. Collect the results diff --git a/go-runner/src/runner.rs b/go-runner/src/runner.rs index 353dddb7..f14862de 100644 --- a/go-runner/src/runner.rs +++ b/go-runner/src/runner.rs @@ -1,38 +1,28 @@ use crate::prelude::*; use std::{path::Path, process::Command}; -pub fn run>(runner_go_path: P, run_args: &[&str]) -> anyhow::Result<()> { - // Extract the directory containing runner.go to use as working directory - let runner_go_path = runner_go_path.as_ref(); - let file_dir = runner_go_path.parent().unwrap(); - let module_root = file_dir.parent().unwrap(); - let relative_path = runner_go_path.strip_prefix(module_root).unwrap(); - debug!( - "Building codspeed runner: {:?} (root = {:?})", - module_root.join(relative_path), - module_root - ); +pub fn run>(binary_path: P, run_args: &[&str]) -> anyhow::Result<()> { + let binary_path = binary_path.as_ref(); + debug!("Running codspeed benchmark binary: {binary_path:?}"); - // Set the integration version in our testing library - let ldflags = format!( - "-X github.com/CodSpeedHQ/codspeed-go/testing/capi.integrationVersion={}", - env!("CARGO_PKG_VERSION") - ); - let args = vec![ - "run", - "-tags=codspeed", - "-ldflags", - &ldflags, - relative_path.to_str().unwrap(), - ]; - let output = Command::new("go") - .args(&args) + // Execute it from the folder with the benchmarks: + // ``` + // benches/ <-- module_root + // codspeed/ + // runner.go + // runner.bin <-- binary_path + // foo_test.go + // fib_test.go + // ``` + let module_root = binary_path.parent().unwrap().parent().unwrap(); + + let output = Command::new(binary_path) .args(run_args) .current_dir(module_root) .stdout(std::process::Stdio::inherit()) .stderr(std::process::Stdio::inherit()) .output() - .context("Failed to execute go build command")?; + .context("Failed to execute benchmark binary")?; if !output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); diff --git a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@golang-benchmarks_4.snap b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@golang-benchmarks_4.snap index 79fcb07b..e7a894cb 100644 --- a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@golang-benchmarks_4.snap +++ b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@golang-benchmarks_4.snap @@ -1,5 +1,5 @@ --- -source: src/integration_tests.rs +source: go-runner/src/integration_tests.rs expression: content --- { From e73b6479ef6563360aed5b46fdec1a6506177010 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Thu, 28 Aug 2025 20:09:10 +0200 Subject: [PATCH 06/17] chore: use resolver v3 for workspace --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 7669d12d..e12585a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,2 +1,3 @@ [workspace] +resolver = "3" members = ["go-runner"] From 6d9f40340e5af948b0682b4f0b3200b457942065 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 16:45:17 +0200 Subject: [PATCH 07/17] feat(test): run latest version of codspeed-go in PR --- go-runner/src/builder/patcher.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/go-runner/src/builder/patcher.rs b/go-runner/src/builder/patcher.rs index 3e749d47..2eacf148 100644 --- a/go-runner/src/builder/patcher.rs +++ b/go-runner/src/builder/patcher.rs @@ -5,6 +5,16 @@ use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; +fn codspeed_go_version() -> anyhow::Result { + // When running in Github Actions, we always want to use the latest + // codspeed-go package. For this, we have to use the current branch. + if std::env::var("GITHUB_ACTIONS").is_ok() { + std::env::var("GITHUB_SHA").context("Couldn't find GITHUB_SHA") + } else { + Ok(format!("v{}", env!("CARGO_PKG_VERSION"))) + } +} + pub fn patch_imports>( folder: P, files_to_patch: Vec, @@ -31,8 +41,10 @@ pub fn patch_imports>( debug!("Patched {patched_files} files"); // 2. Update the go module to use the codspeed package - let version = env!("CARGO_PKG_VERSION"); - let pkg = format!("github.com/CodSpeedHQ/codspeed-go@v{version}"); + let pkg = format!( + "github.com/CodSpeedHQ/codspeed-go@{}", + codspeed_go_version()? + ); debug!("Installing {pkg}"); let mut cmd: Command = Command::new("go"); From 219b86fd0ed665b8c1e75b67d2e38907fcf95a25 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 17:17:25 +0200 Subject: [PATCH 08/17] feat(go-runner): use local codspeed-go pkg in CI and for tests --- go-runner/src/builder/patcher.rs | 40 +++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/go-runner/src/builder/patcher.rs b/go-runner/src/builder/patcher.rs index 2eacf148..8e5bf402 100644 --- a/go-runner/src/builder/patcher.rs +++ b/go-runner/src/builder/patcher.rs @@ -5,14 +5,28 @@ use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; -fn codspeed_go_version() -> anyhow::Result { - // When running in Github Actions, we always want to use the latest - // codspeed-go package. For this, we have to use the current branch. - if std::env::var("GITHUB_ACTIONS").is_ok() { - std::env::var("GITHUB_SHA").context("Couldn't find GITHUB_SHA") - } else { - Ok(format!("v{}", env!("CARGO_PKG_VERSION"))) +pub fn replace_pkg>(folder: P) -> anyhow::Result<()> { + let codspeed_root = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap(); + let replace_arg = format!( + "github.com/CodSpeedHQ/codspeed-go={}", + codspeed_root.display() + ); + debug!("Replacing codspeed-go with {}", codspeed_root.display()); + + let output = Command::new("go") + .args(["mod", "edit", "-replace", &replace_arg]) + .current_dir(folder.as_ref()) + .output() + .context("Failed to execute 'go mod edit' command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + bail!("Failed to add replace directive: {}", stderr); } + + debug!("Added local replace directive to go.mod"); + + Ok(()) } pub fn patch_imports>( @@ -41,10 +55,8 @@ pub fn patch_imports>( debug!("Patched {patched_files} files"); // 2. Update the go module to use the codspeed package - let pkg = format!( - "github.com/CodSpeedHQ/codspeed-go@{}", - codspeed_go_version()? - ); + let version = format!("v{}", env!("CARGO_PKG_VERSION")); + let pkg = format!("github.com/CodSpeedHQ/codspeed-go@{version}"); debug!("Installing {pkg}"); let mut cmd: Command = Command::new("go"); @@ -63,6 +75,12 @@ pub fn patch_imports>( debug!("Successfully installed codspeed-go dependency"); + // Ensure we have the latest codspeed-go package installed. Just + // use the local one which might contain uncommitted changes. + if std::env::var("GITHUB_ACTIONS").is_ok() || cfg!(test) { + replace_pkg(folder)?; + } + Ok(()) } From 5177428159f84094b0e1c5e24a20f95f1aed8476 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Tue, 2 Sep 2025 11:19:58 +0200 Subject: [PATCH 09/17] feat(ci): specify codspeed mode --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2be77baa..9bfffe5c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,6 +47,7 @@ jobs: - name: Run the benchmarks uses: CodSpeedHQ/action@main with: + mode: walltime working-directory: example run: cargo r --manifest-path ../go-runner/Cargo.toml -- test -bench=. From 9e8cd9e33e4d93fac0d031f919774fcc5123e7d9 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 17:47:53 +0200 Subject: [PATCH 10/17] refactor: running with codspeed message --- README.md | 2 +- testing/testing/benchmark.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 93c1cb56..215fb002 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ $ go-runner test -bench=. [INFO go_runner] Found BenchmarkFibonacci20_Loop in "fib_test.go" [INFO go_runner] Generating custom runner for package: example [INFO go_runner] Running benchmarks for package: example -Running with CodSpeed instrumentation +Running with CodSpeed (mode: walltime) goos: linux goarch: amd64 cpu: 12th Gen Intel(R) Core(TM) i7-1260P @ 1672.130MHz diff --git a/testing/testing/benchmark.go b/testing/testing/benchmark.go index 146e8208..2abb06d8 100644 --- a/testing/testing/benchmark.go +++ b/testing/testing/benchmark.go @@ -296,7 +296,7 @@ var labelsOnce sync.Once // subbenchmarks. b must not have subbenchmarks. func (b *B) run() { labelsOnce.Do(func() { - fmt.Fprintf(b.w, "Running with CodSpeed instrumentation\n") + fmt.Fprintf(b.w, "Running with CodSpeed (mode: walltime)\n") fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS) fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH) From 9722623a6bd5b9620e86356510e3b47803a67a31 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 18:15:21 +0200 Subject: [PATCH 11/17] fix(testing): hide _codspeed.go filenames --- go-runner/src/lib.rs | 4 +- go-runner/src/runner.rs | 38 +++++++++++----- go-runner/tests/error_file.in/error_test.go | 7 +++ go-runner/tests/error_file.in/go.mod | 3 ++ go-runner/tests/error_file.rs | 17 +++++++ go-runner/tests/utils.rs | 50 +++++++++++++++++++++ testing/testing/testing.go | 5 +++ 7 files changed, 112 insertions(+), 12 deletions(-) create mode 100644 go-runner/tests/error_file.in/error_test.go create mode 100644 go-runner/tests/error_file.in/go.mod create mode 100644 go-runner/tests/error_file.rs create mode 100644 go-runner/tests/utils.rs diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index 718c173a..60969525 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -4,11 +4,11 @@ use std::{ path::{Path, PathBuf}, }; -mod builder; +pub mod builder; pub mod cli; pub mod prelude; mod results; -mod runner; +pub mod runner; pub(crate) mod utils; #[cfg(test)] diff --git a/go-runner/src/runner.rs b/go-runner/src/runner.rs index f14862de..37c142b3 100644 --- a/go-runner/src/runner.rs +++ b/go-runner/src/runner.rs @@ -1,7 +1,7 @@ use crate::prelude::*; use std::{path::Path, process::Command}; -pub fn run>(binary_path: P, run_args: &[&str]) -> anyhow::Result<()> { +fn run_cmd>(binary_path: P, run_args: &[&str]) -> anyhow::Result { let binary_path = binary_path.as_ref(); debug!("Running codspeed benchmark binary: {binary_path:?}"); @@ -15,15 +15,12 @@ pub fn run>(binary_path: P, run_args: &[&str]) -> anyhow::Result< // fib_test.go // ``` let module_root = binary_path.parent().unwrap().parent().unwrap(); + let mut cmd = Command::new(binary_path); + cmd.args(run_args).current_dir(module_root); + Ok(cmd) +} - let output = Command::new(binary_path) - .args(run_args) - .current_dir(module_root) - .stdout(std::process::Stdio::inherit()) - .stderr(std::process::Stdio::inherit()) - .output() - .context("Failed to execute benchmark binary")?; - +fn check_success(output: &std::process::Output) -> anyhow::Result { if !output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); let stderr = String::from_utf8_lossy(&output.stderr); @@ -33,6 +30,27 @@ pub fn run>(binary_path: P, run_args: &[&str]) -> anyhow::Result< bail!("Failed to run benchmark. Exit status: {}", output.status); } + Ok(String::from_utf8_lossy(&output.stdout).to_string()) +} + +/// Runs the cmd and returns the output. +pub fn run_with_stdout>( + binary_path: P, + run_args: &[&str], +) -> anyhow::Result { + let mut cmd = run_cmd(binary_path, run_args)?; + let output = cmd.output().context("Failed to execute go build command")?; + check_success(&output) +} + +/// Runs the cmd and forwards the output to stdout/stderr. +pub fn run>(binary_path: P, run_args: &[&str]) -> anyhow::Result<()> { + let mut cmd = run_cmd(binary_path, run_args)?; + let output = cmd + .stdout(std::process::Stdio::inherit()) + .stderr(std::process::Stdio::inherit()) + .output() + .context("Failed to execute go build command")?; - Ok(()) + check_success(&output).map(|_| ()) } diff --git a/go-runner/tests/error_file.in/error_test.go b/go-runner/tests/error_file.in/error_test.go new file mode 100644 index 00000000..273de8f2 --- /dev/null +++ b/go-runner/tests/error_file.in/error_test.go @@ -0,0 +1,7 @@ +package error_file + +import "testing" + +func BenchmarkErrorFile(b *testing.B) { + b.Error("this_should_be_in_stdout") +} diff --git a/go-runner/tests/error_file.in/go.mod b/go-runner/tests/error_file.in/go.mod new file mode 100644 index 00000000..995513a5 --- /dev/null +++ b/go-runner/tests/error_file.in/go.mod @@ -0,0 +1,3 @@ +module example + +go 1.24.5 diff --git a/go-runner/tests/error_file.rs b/go-runner/tests/error_file.rs new file mode 100644 index 00000000..364788e6 --- /dev/null +++ b/go-runner/tests/error_file.rs @@ -0,0 +1,17 @@ +pub mod utils; + +use utils::run_with_args; + +#[test] +pub fn test_error_has_test_filename() { + let stdout = run_with_args( + "tests/error_file.in", + &["-test.bench", "BenchmarkErrorFile", "-test.benchtime", "1x"], + ) + .unwrap(); + + eprintln!("Error output: {stdout}"); + assert!(stdout.contains("this_should_be_in_stdout")); + assert!(stdout.contains("error_test.go")); + assert!(!stdout.contains("error_codspeed.go")); +} diff --git a/go-runner/tests/utils.rs b/go-runner/tests/utils.rs new file mode 100644 index 00000000..2f34cca3 --- /dev/null +++ b/go-runner/tests/utils.rs @@ -0,0 +1,50 @@ +use codspeed_go_runner::{builder, builder::BenchmarkPackage, cli::Cli, runner}; +use std::path::Path; + +/// Helper function to run a single package with arguments +pub fn run_package_with_args(package: &BenchmarkPackage, args: &[&str]) -> anyhow::Result { + let (_dir, runner_path) = builder::templater::run(package)?; + let binary_path = builder::build_binary(&runner_path)?; + runner::run_with_stdout(&binary_path, args) +} + +/// Helper function to run tests in a directory with specific arguments +pub fn run_with_args>(dir: P, args: &[&str]) -> anyhow::Result { + assert!(dir.as_ref().exists()); + + let packages = BenchmarkPackage::from_project(dir.as_ref(), &["./...".to_string()])?; + assert_eq!(packages.len(), 1); + + run_package_with_args(&packages[0], args) +} + +/// Helper function to run a single package using CLI configuration +pub fn run_with_cli>(dir: P, cli: &Cli) -> anyhow::Result { + assert!(dir.as_ref().exists()); + + let packages = BenchmarkPackage::from_project(dir.as_ref(), &cli.packages)?; + assert_eq!( + packages.len(), + 1, + "Currently only single package is supported" + ); + + let args = ["-test.bench", &cli.bench, "-test.benchtime", &cli.benchtime]; + run_package_with_args(&packages[0], &args) +} + +/// Helper function to run multiple packages using CLI configuration +pub fn run_with_cli_multi>(dir: P, cli: &Cli) -> anyhow::Result { + assert!(dir.as_ref().exists()); + + let packages = BenchmarkPackage::from_project(dir.as_ref(), &cli.packages)?; + + let mut all_stdout = String::new(); + for package in &packages { + let args = ["-test.bench", &cli.bench, "-test.benchtime", &cli.benchtime]; + let stdout = run_package_with_args(package, &args)?; + all_stdout.push_str(&stdout); + } + + Ok(all_stdout) +} diff --git a/testing/testing/testing.go b/testing/testing/testing.go index 899f9048..4d0a8668 100644 --- a/testing/testing/testing.go +++ b/testing/testing/testing.go @@ -811,6 +811,11 @@ func (c *common) decorate(s string, skip int) string { } else if index := strings.LastIndexAny(file, `/\`); index >= 0 { file = file[index+1:] } + + // Replace _codspeed.go with _test.go for better user experience + if strings.HasSuffix(file, "_codspeed.go") { + file = strings.TrimSuffix(file, "_codspeed.go") + "_test.go" + } } else { file = "???" } From ab6765ad27f98bd3694361d6696c974d273b62ad Mon Sep 17 00:00:00 2001 From: not-matthias Date: Fri, 29 Aug 2025 18:26:49 +0200 Subject: [PATCH 12/17] feat(go-runner): support benchtime and pkg args --- go-runner/src/builder/discovery.rs | 24 ++++-- go-runner/src/cli.rs | 97 +++++++++++++++++----- go-runner/src/integration_tests.rs | 6 +- go-runner/src/lib.rs | 22 ++--- go-runner/src/main.rs | 2 +- go-runner/tests/pkg_arg.in/bar/bar_test.go | 19 +++++ go-runner/tests/pkg_arg.in/foo/foo_test.go | 19 +++++ go-runner/tests/pkg_arg.in/go.mod | 3 + go-runner/tests/pkg_arg.rs | 54 ++++++++++++ go-runner/tests/utils.rs | 10 +++ 10 files changed, 209 insertions(+), 47 deletions(-) create mode 100644 go-runner/tests/pkg_arg.in/bar/bar_test.go create mode 100644 go-runner/tests/pkg_arg.in/foo/foo_test.go create mode 100644 go-runner/tests/pkg_arg.in/go.mod create mode 100644 go-runner/tests/pkg_arg.rs diff --git a/go-runner/src/builder/discovery.rs b/go-runner/src/builder/discovery.rs index 29323408..c22fb5fe 100644 --- a/go-runner/src/builder/discovery.rs +++ b/go-runner/src/builder/discovery.rs @@ -233,8 +233,11 @@ impl BenchmarkPackage { } } - pub fn from_project(go_project_path: &Path) -> anyhow::Result> { - let raw_packages = Self::run_go_list(go_project_path)?; + pub fn from_project( + go_project_path: &Path, + packages: &[String], + ) -> anyhow::Result> { + let raw_packages = Self::run_go_list(go_project_path, packages)?; let has_test_files = |files: &Vec| files.iter().any(|name| name.ends_with("_test.go")); let has_test_imports = |imports: &Vec| { @@ -297,10 +300,13 @@ impl BenchmarkPackage { Ok(packages) } - fn run_go_list(go_project_path: &Path) -> anyhow::Result> { - // Execute 'go list -test -compiled -json ./...' to get package information + fn run_go_list(go_project_path: &Path, packages: &[String]) -> anyhow::Result> { + // Execute 'go list -test -compiled -json ' to get package information + let mut args = vec!["list", "-test", "-compiled", "-json"]; + args.extend(packages.iter().map(|s| s.as_str())); + let output = Command::new("go") - .args(["list", "-test", "-compiled", "-json", "./..."]) + .args(args) .current_dir(go_project_path) .output()?; @@ -333,9 +339,11 @@ mod tests { #[test] fn test_discover_benchmarks() { - let packages = - BenchmarkPackage::from_project(Path::new("testdata/projects/golang-benchmarks")) - .unwrap(); + let packages = BenchmarkPackage::from_project( + Path::new("testdata/projects/golang-benchmarks"), + &["./...".to_string()], + ) + .unwrap(); insta::assert_json_snapshot!(packages, { ".**[\"Dir\"]" => "[package_dir]", diff --git a/go-runner/src/cli.rs b/go-runner/src/cli.rs index 8b6d93d3..8bc2e2de 100644 --- a/go-runner/src/cli.rs +++ b/go-runner/src/cli.rs @@ -10,6 +10,22 @@ pub enum CliExit { pub struct Cli { /// Run only benchmarks matching regexp pub bench: String, + + /// Run each benchmark for duration d (e.g., '3s') + pub benchtime: String, + + /// Package patterns to run benchmarks for + pub packages: Vec, +} + +impl Default for Cli { + fn default() -> Self { + Self { + bench: ".".into(), + benchtime: "3s".into(), + packages: vec!["./...".into()], + } + } } impl Cli { @@ -24,7 +40,7 @@ impl Cli { } fn parse_args(mut args: impl Iterator) -> Result { - let mut bench = ".".to_string(); + let mut instance = Self::default(); // We currently only support the `test` subcommand. let cmd = args.next(); @@ -41,12 +57,19 @@ impl Cli { The Codspeed Go Benchmark Runner USAGE: - go-runner test [OPTIONS] + go-runner test [OPTIONS] [PACKAGES...] OPTIONS: -bench Run only benchmarks matching regexp (defaults to '.') + -benchtime Run each benchmark for duration d (defaults to '3s') -h, --help Print help information - -V, --version Print version information" + -V, --version Print version information + +SUPPORTED FLAGS: + -bench, -benchtime + +UNSUPPORTED FLAGS (will be warned about): + -benchmem, -count, -cpu, -cpuprofile, -memprofile, -trace, etc." ); return Err(CliExit::Help); } @@ -55,35 +78,40 @@ OPTIONS: return Err(CliExit::Version); } "-bench" => { - bench = args.next().ok_or_else(|| { + instance.bench = args.next().ok_or_else(|| { eprintln!("error: `-bench` requires a pattern"); CliExit::MissingArgument })?; } s if s.starts_with("-bench=") => { - bench = s.split_once('=').unwrap().1.to_string(); + instance.bench = s.split_once('=').unwrap().1.to_string(); } - - s if s.starts_with('-') => { - eprintln!("Unknown flag: {s}"); - return Err(CliExit::UnknownFlag); + "-benchtime" => { + instance.benchtime = args.next().ok_or_else(|| { + eprintln!("error: `-benchtime` requires a duration"); + CliExit::MissingArgument + })?; } - - _ => { + s if s.starts_with("-benchtime=") => { + instance.benchtime = s.split_once('=').unwrap().1.to_string(); + } + s if s.starts_with('-') => { eprintln!( - "warning: package arguments are not currently supported, ignoring '{arg}'" + "warning: flag '{s}' is not supported by CodSpeed Go runner, ignoring" ); - // Consume and ignore all remaining arguments - for remaining_arg in args { - eprintln!( - "warning: package arguments are not currently supported, ignoring '{remaining_arg}'" - ); - } + } + _ => { + // Collect package arguments for filtering + instance.packages = { + let mut packages = vec![arg]; + packages.extend(args); + packages + }; break; } } } - Ok(Self { bench }) + Ok(instance) } } @@ -107,6 +135,8 @@ mod tests { fn test_cli_parse_defaults() { let cli = str_to_iter("go-runner test").unwrap(); assert_eq!(cli.bench, "."); + assert_eq!(cli.benchtime, Cli::default().benchtime); + assert_eq!(cli.packages, Cli::default().packages); } #[test] @@ -119,9 +149,30 @@ mod tests { } #[test] - fn test_cli_parse_ignores_packages() { + fn test_cli_parse_with_benchtime_flag() { + let cli = str_to_iter("go-runner test -benchtime 3s").unwrap(); + assert_eq!(cli.benchtime, "3s".to_string()); + + let cli = str_to_iter("go-runner test -benchtime=10x").unwrap(); + assert_eq!(cli.benchtime, "10x".to_string()); + } + + #[test] + fn test_cli_parse_with_packages() { let cli = str_to_iter("go-runner test package1 package2").unwrap(); assert_eq!(cli.bench, "."); + assert_eq!( + cli.packages, + vec!["package1".to_string(), "package2".to_string()] + ); + } + + #[test] + fn test_cli_parse_combined_flags() { + let cli = str_to_iter("go-runner test -bench=BenchmarkFoo -benchtime 5s ./pkg").unwrap(); + assert_eq!(cli.bench, "BenchmarkFoo"); + assert_eq!(cli.benchtime, "5s".to_string()); + assert_eq!(cli.packages, vec!["./pkg".to_string()]); } #[test] @@ -147,7 +198,11 @@ mod tests { let result = str_to_iter("go-runner test -bench"); assert!(matches!(result, Err(CliExit::MissingArgument))); + let result = str_to_iter("go-runner test -benchtime"); + assert!(matches!(result, Err(CliExit::MissingArgument))); + + // Unknown flags now generate warnings but don't cause errors let result = str_to_iter("go-runner test -unknown"); - assert!(matches!(result, Err(CliExit::UnknownFlag))); + assert!(result.is_ok()); } } diff --git a/go-runner/src/integration_tests.rs b/go-runner/src/integration_tests.rs index 6f6f75e4..e1bc7d40 100644 --- a/go-runner/src/integration_tests.rs +++ b/go-runner/src/integration_tests.rs @@ -81,7 +81,11 @@ fn test_build_and_run(#[case] project_name: &str) { let profile_dir = temp_dir.path().join("profile"); unsafe { std::env::set_var("CODSPEED_PROFILE_FOLDER", &profile_dir) }; - if let Err(error) = crate::run_benchmarks(temp_dir.path(), ".") { + let cli = crate::cli::Cli { + benchtime: "1x".into(), + ..Default::default() + }; + if let Err(error) = crate::run_benchmarks(temp_dir.path(), &cli) { panic!("Benchmarks couldn't run: {error}"); } diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index 60969525..df84c4d2 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -15,13 +15,13 @@ pub(crate) mod utils; mod integration_tests; /// Builds and runs the specified Go project benchmarks, writing results to the .codspeed folder. -pub fn run_benchmarks(project_dir: &Path, bench: &str) -> anyhow::Result<()> { +pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Result<()> { let profile_dir = std::env::var("CODSPEED_PROFILE_FOLDER") .context("CODSPEED_PROFILE_FOLDER env var not set")?; std::fs::remove_dir_all(&profile_dir).ok(); // 1. Build phase - Benchmark and package discovery - let packages = BenchmarkPackage::from_project(project_dir)?; + let packages = BenchmarkPackage::from_project(project_dir, &cli.packages)?; info!("Discovered {} packages", packages.len()); let mut bench_name_to_path = HashMap::new(); @@ -45,20 +45,10 @@ pub fn run_benchmarks(project_dir: &Path, bench: &str) -> anyhow::Result<()> { info!("Building binary for package: {}", package.name); let binary_path = builder::build_binary(&runner_path)?; - let args = [ - "-test.bench", - bench, - // Use a single iteration in tests to speed up execution, otherwise use 5 seconds - "-test.benchtime", - if cfg!(test) || std::env::var("CODSPEED_ENV").is_err() { - "1x" - } else { - "5s" - }, - ]; - - info!("Running benchmarks for package: {}", package.name); - runner::run(&binary_path, &args)?; + runner::run( + &binary_path, + &["-test.bench", &cli.bench, "-test.benchtime", &cli.benchtime], + )?; } // 3. Collect the results diff --git a/go-runner/src/main.rs b/go-runner/src/main.rs index 11ad4dff..bd78b8d5 100644 --- a/go-runner/src/main.rs +++ b/go-runner/src/main.rs @@ -9,7 +9,7 @@ fn main() -> anyhow::Result<()> { .init(); let cli = Cli::parse(); - codspeed_go_runner::run_benchmarks(Path::new("."), &cli.bench)?; + codspeed_go_runner::run_benchmarks(Path::new("."), &cli)?; Ok(()) } diff --git a/go-runner/tests/pkg_arg.in/bar/bar_test.go b/go-runner/tests/pkg_arg.in/bar/bar_test.go new file mode 100644 index 00000000..baece0df --- /dev/null +++ b/go-runner/tests/pkg_arg.in/bar/bar_test.go @@ -0,0 +1,19 @@ +package bar + +import "testing" + +func BenchmarkBar1(b *testing.B) { + b.Log("bar_bench_1") + for i := 0; i < b.N; i++ { + // Some work + _ = i * 4 + } +} + +func BenchmarkBar2(b *testing.B) { + b.Log("bar_bench_2") + for i := 0; i < b.N; i++ { + // Some work + _ = i * 5 + } +} diff --git a/go-runner/tests/pkg_arg.in/foo/foo_test.go b/go-runner/tests/pkg_arg.in/foo/foo_test.go new file mode 100644 index 00000000..542c76a2 --- /dev/null +++ b/go-runner/tests/pkg_arg.in/foo/foo_test.go @@ -0,0 +1,19 @@ +package foo + +import "testing" + +func BenchmarkFoo1(b *testing.B) { + b.Log("foo_bench_1") + for i := 0; i < b.N; i++ { + // Some work + _ = i * 2 + } +} + +func BenchmarkFoo2(b *testing.B) { + b.Log("foo_bench_2") + for i := 0; i < b.N; i++ { + // Some work + _ = i * 3 + } +} diff --git a/go-runner/tests/pkg_arg.in/go.mod b/go-runner/tests/pkg_arg.in/go.mod new file mode 100644 index 00000000..995513a5 --- /dev/null +++ b/go-runner/tests/pkg_arg.in/go.mod @@ -0,0 +1,3 @@ +module example + +go 1.24.5 diff --git a/go-runner/tests/pkg_arg.rs b/go-runner/tests/pkg_arg.rs new file mode 100644 index 00000000..0f231e59 --- /dev/null +++ b/go-runner/tests/pkg_arg.rs @@ -0,0 +1,54 @@ +use codspeed_go_runner::cli::Cli; +use utils::{run_with_cli, run_with_cli_multi}; + +pub mod utils; + +#[test] +pub fn test_pkg_arg_filters_correctly() { + let cli = Cli { + bench: "BenchmarkBar1".to_string(), + benchtime: "1x".to_string(), + packages: vec!["./bar".to_string()], + }; + let stdout = run_with_cli("tests/pkg_arg.in", &cli).unwrap(); + + // Should contain output from the targeted benchmark + assert!(stdout.contains("bar_bench_1")); + + // Should NOT contain output from other benchmarks + assert!(!stdout.contains("foo_bench_1")); + assert!(!stdout.contains("foo_bench_2")); + assert!(!stdout.contains("bar_bench_2")); +} + +#[test] +pub fn test_pkg_arg_all_packages() { + let cli = Cli { + bench: ".".to_string(), + benchtime: "1x".to_string(), + packages: vec!["./...".to_string()], + }; + let stdout = run_with_cli_multi("tests/pkg_arg.in", &cli).unwrap(); + + // Should contain output from all benchmarks when using ./... + assert!(stdout.contains("foo_bench_1")); + assert!(stdout.contains("foo_bench_2")); + assert!(stdout.contains("bar_bench_1")); + assert!(stdout.contains("bar_bench_2")); +} + +#[test] +pub fn test_pkg_arg_multiple_packages() { + let cli = Cli { + bench: ".".to_string(), + benchtime: "1x".to_string(), + packages: vec!["./foo".to_string(), "./bar".to_string()], + }; + let stdout = run_with_cli_multi("tests/pkg_arg.in", &cli).unwrap(); + + // Should contain output from both foo and bar packages + assert!(stdout.contains("foo_bench_1")); + assert!(stdout.contains("foo_bench_2")); + assert!(stdout.contains("bar_bench_1")); + assert!(stdout.contains("bar_bench_2")); +} diff --git a/go-runner/tests/utils.rs b/go-runner/tests/utils.rs index 2f34cca3..8ab713d1 100644 --- a/go-runner/tests/utils.rs +++ b/go-runner/tests/utils.rs @@ -1,8 +1,18 @@ use codspeed_go_runner::{builder, builder::BenchmarkPackage, cli::Cli, runner}; use std::path::Path; +use std::sync::Mutex; +use tempfile::TempDir; /// Helper function to run a single package with arguments pub fn run_package_with_args(package: &BenchmarkPackage, args: &[&str]) -> anyhow::Result { + // Mutex to prevent concurrent tests from interfering with CODSPEED_PROFILE_FOLDER env var + static ENV_MUTEX: Mutex<()> = Mutex::new(()); + let _env_guard = ENV_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); + + let temp_dir = TempDir::new()?; + let profile_dir = temp_dir.path().join("profile"); + unsafe { std::env::set_var("CODSPEED_PROFILE_FOLDER", &profile_dir) }; + let (_dir, runner_path) = builder::templater::run(package)?; let binary_path = builder::build_binary(&runner_path)?; runner::run_with_stdout(&binary_path, args) From f95d2590097205dc236c2250041cb311f63cd5e5 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Mon, 1 Sep 2025 13:15:36 +0200 Subject: [PATCH 13/17] fix(go-runner): run tests in real project dir --- go-runner/src/integration_tests.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/go-runner/src/integration_tests.rs b/go-runner/src/integration_tests.rs index e1bc7d40..cacac729 100644 --- a/go-runner/src/integration_tests.rs +++ b/go-runner/src/integration_tests.rs @@ -6,18 +6,6 @@ use tempfile::TempDir; use crate::results::walltime_results::WalltimeResults; -fn setup_test_project(project_name: &str) -> anyhow::Result { - let project_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("testdata/projects") - .join(project_name); - println!("Project path: {project_path:?}"); - - let temp_dir = TempDir::new()?; - crate::utils::copy_dir_recursively(&project_path, &temp_dir)?; - - Ok(temp_dir) -} - fn assert_results_snapshots(profile_dir: &Path, project_name: &str) { let glob_pattern = profile_dir.join("results"); if !glob_pattern.exists() { @@ -73,19 +61,22 @@ fn assert_results_snapshots(profile_dir: &Path, project_name: &str) { #[case::fuego("fuego")] #[case::cli_runtime("cli-runtime")] fn test_build_and_run(#[case] project_name: &str) { - let temp_dir = setup_test_project(project_name).unwrap(); + let project_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("testdata/projects") + .join(project_name); // Mutex to prevent concurrent tests from interfering with CODSPEED_PROFILE_FOLDER env var static ENV_MUTEX: Mutex<()> = Mutex::new(()); let _env_guard = ENV_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); + let temp_dir = TempDir::new().unwrap(); let profile_dir = temp_dir.path().join("profile"); unsafe { std::env::set_var("CODSPEED_PROFILE_FOLDER", &profile_dir) }; let cli = crate::cli::Cli { benchtime: "1x".into(), ..Default::default() }; - if let Err(error) = crate::run_benchmarks(temp_dir.path(), &cli) { + if let Err(error) = crate::run_benchmarks(project_dir.as_path(), &cli) { panic!("Benchmarks couldn't run: {error}"); } From 45e3adb685f0c97c04877eca1514ecc644329c1e Mon Sep 17 00:00:00 2001 From: not-matthias Date: Mon, 1 Sep 2025 13:21:14 +0200 Subject: [PATCH 14/17] fix(go-runner): use git-relative file paths --- go-runner/src/integration_tests.rs | 1 + go-runner/src/lib.rs | 5 +- ...s__assert_results_snapshots@example_0.snap | 203 ++++++++++++++++++ ...s__assert_results_snapshots@example_0.snap | 203 ++++++++++++++++++ go-runner/src/utils.rs | 35 ++- go-runner/testdata/projects/example | 1 + 6 files changed, 446 insertions(+), 2 deletions(-) create mode 100644 go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap create mode 100644 go-runner/src/snapshots/go_runner__integration_tests__assert_results_snapshots@example_0.snap create mode 120000 go-runner/testdata/projects/example diff --git a/go-runner/src/integration_tests.rs b/go-runner/src/integration_tests.rs index cacac729..5f0f5f5b 100644 --- a/go-runner/src/integration_tests.rs +++ b/go-runner/src/integration_tests.rs @@ -60,6 +60,7 @@ fn assert_results_snapshots(profile_dir: &Path, project_name: &str) { // Currently not producing results: #[case::fuego("fuego")] #[case::cli_runtime("cli-runtime")] +#[case::example("example")] fn test_build_and_run(#[case] project_name: &str) { let project_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("testdata/projects") diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index df84c4d2..60ba8161 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -27,7 +27,10 @@ pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Resu let mut bench_name_to_path = HashMap::new(); for package in &packages { for benchmark in &package.benchmarks { - bench_name_to_path.insert(benchmark.name.clone(), benchmark.file_path.clone()); + // Create absolute path and immediately convert to git-relative path + let abs_path = package.module.dir.join(&benchmark.file_path); + let git_relative_path = crate::utils::get_git_relative_path(&abs_path); + bench_name_to_path.insert(benchmark.name.clone(), git_relative_path); } } diff --git a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap new file mode 100644 index 00000000..fb6cacd1 --- /dev/null +++ b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap @@ -0,0 +1,203 @@ +--- +source: go-runner/src/integration_tests.rs +expression: content +--- +{ + "creator": { + "name": "codspeed-go", + "version": "0.1.0", + "pid": "[pid]" + }, + "instrument": { + "type": "walltime" + }, + "benchmarks": [ + { + "name": "BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "uri": "example/fib_test.go::BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_Loop", + "uri": "example/fib_test.go::BenchmarkFibonacci20_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_bN", + "uri": "example/fib_test.go::BenchmarkFibonacci20_bN", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns", + "uri": "example/sleep_test.go::BenchmarkSleep100ns", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100ns_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us", + "uri": "example/sleep_test.go::BenchmarkSleep100us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms", + "uri": "example/sleep_test.go::BenchmarkSleep10ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us", + "uri": "example/sleep_test.go::BenchmarkSleep10us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms", + "uri": "example/sleep_test.go::BenchmarkSleep1ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us", + "uri": "example/sleep_test.go::BenchmarkSleep1us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms", + "uri": "example/sleep_test.go::BenchmarkSleep50ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep50ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + } + ] +} diff --git a/go-runner/src/snapshots/go_runner__integration_tests__assert_results_snapshots@example_0.snap b/go-runner/src/snapshots/go_runner__integration_tests__assert_results_snapshots@example_0.snap new file mode 100644 index 00000000..fb6cacd1 --- /dev/null +++ b/go-runner/src/snapshots/go_runner__integration_tests__assert_results_snapshots@example_0.snap @@ -0,0 +1,203 @@ +--- +source: go-runner/src/integration_tests.rs +expression: content +--- +{ + "creator": { + "name": "codspeed-go", + "version": "0.1.0", + "pid": "[pid]" + }, + "instrument": { + "type": "walltime" + }, + "benchmarks": [ + { + "name": "BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "uri": "example/fib_test.go::BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_Loop", + "uri": "example/fib_test.go::BenchmarkFibonacci20_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_bN", + "uri": "example/fib_test.go::BenchmarkFibonacci20_bN", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns", + "uri": "example/sleep_test.go::BenchmarkSleep100ns", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100ns_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us", + "uri": "example/sleep_test.go::BenchmarkSleep100us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms", + "uri": "example/sleep_test.go::BenchmarkSleep10ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us", + "uri": "example/sleep_test.go::BenchmarkSleep10us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms", + "uri": "example/sleep_test.go::BenchmarkSleep1ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us", + "uri": "example/sleep_test.go::BenchmarkSleep1us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms", + "uri": "example/sleep_test.go::BenchmarkSleep50ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep50ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + } + ] +} diff --git a/go-runner/src/utils.rs b/go-runner/src/utils.rs index 4ee817c5..2ef3a05d 100644 --- a/go-runner/src/utils.rs +++ b/go-runner/src/utils.rs @@ -1,4 +1,4 @@ -use std::path::Path; +use std::path::{Path, PathBuf}; use std::{fs, io}; pub fn copy_dir_recursively(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { @@ -18,3 +18,36 @@ pub fn copy_dir_recursively(src: impl AsRef, dst: impl AsRef) -> io: } Ok(()) } + +// WARN: Git-related helper functions have been taken from codspeed-rust. Keep in sync! + +fn get_parent_git_repo_path(abs_path: &Path) -> io::Result { + if abs_path.join(".git").exists() { + Ok(abs_path.to_path_buf()) + } else { + get_parent_git_repo_path( + abs_path + .parent() + .ok_or(io::Error::from(io::ErrorKind::NotFound))?, + ) + } +} + +pub fn get_git_relative_path

(abs_path: P) -> PathBuf +where + P: AsRef, +{ + if let Ok(canonicalized_abs_path) = abs_path.as_ref().canonicalize() { + // `repo_path` is still canonicalized as it is a subpath of `canonicalized_abs_path` + if let Ok(repo_path) = get_parent_git_repo_path(&canonicalized_abs_path) { + canonicalized_abs_path + .strip_prefix(repo_path) + .expect("Repository path is malformed.") + .to_path_buf() + } else { + canonicalized_abs_path + } + } else { + abs_path.as_ref().to_path_buf() + } +} diff --git a/go-runner/testdata/projects/example b/go-runner/testdata/projects/example new file mode 120000 index 00000000..9f480167 --- /dev/null +++ b/go-runner/testdata/projects/example @@ -0,0 +1 @@ +../../../example \ No newline at end of file From 60a4216dd44bcd1a985cadad0904c2fafd703db6 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Mon, 1 Sep 2025 20:33:15 +0200 Subject: [PATCH 15/17] feat: add nested example --- example/very/nested/module/example_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 example/very/nested/module/example_test.go diff --git a/example/very/nested/module/example_test.go b/example/very/nested/module/example_test.go new file mode 100644 index 00000000..1bee739a --- /dev/null +++ b/example/very/nested/module/example_test.go @@ -0,0 +1,9 @@ +package example + +import "testing" + +func BenchmarkExample(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = 42 + } +} From d0ccd5a3e2e69102fa46ed1f7d1370690a6fa6a9 Mon Sep 17 00:00:00 2001 From: not-matthias Date: Mon, 1 Sep 2025 20:40:02 +0200 Subject: [PATCH 16/17] feat: resolve git-relative path inside benchmark --- example-codspeed/go-runner.metadata | 4 + go-runner/src/builder/templater.rs | 29 +++ go-runner/src/lib.rs | 30 +-- go-runner/src/results/raw_result.rs | 41 ++-- ...s__assert_results_snapshots@example_0.snap | 180 +--------------- ...s__assert_results_snapshots@example_1.snap | 203 ++++++++++++++++++ ...s__assert_results_snapshots@zerolog_1.snap | 14 +- go-runner/src/utils.rs | 2 - testing/testing/benchmark.go | 47 +++- testing/testing/codspeed.go | 68 ++++++ 10 files changed, 371 insertions(+), 247 deletions(-) create mode 100644 example-codspeed/go-runner.metadata create mode 100644 go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_1.snap create mode 100644 testing/testing/codspeed.go diff --git a/example-codspeed/go-runner.metadata b/example-codspeed/go-runner.metadata new file mode 100644 index 00000000..a192f5fa --- /dev/null +++ b/example-codspeed/go-runner.metadata @@ -0,0 +1,4 @@ +{ + "profile_folder": "/tmp", + "relative_package_path": "codspeed-go" +} diff --git a/go-runner/src/builder/templater.rs b/go-runner/src/builder/templater.rs index 0fb96219..8c29ef2a 100644 --- a/go-runner/src/builder/templater.rs +++ b/go-runner/src/builder/templater.rs @@ -9,6 +9,12 @@ use crate::builder::{BenchmarkPackage, GoBenchmark}; use crate::utils; use crate::{builder::patcher, prelude::*}; +#[derive(Debug, Serialize)] +struct GoRunnerMetadata { + profile_folder: String, + relative_package_path: String, +} + #[derive(Debug, Serialize, Deserialize)] struct TemplateData { benchmarks: Vec, @@ -21,6 +27,29 @@ pub fn run(package: &BenchmarkPackage) -> anyhow::Result<(TempDir, PathBuf)> { std::fs::create_dir_all(&target_dir).context("Failed to create target directory")?; utils::copy_dir_recursively(&package.module.dir, &target_dir)?; + // Create a new go-runner.metadata file in the root of the project + // + // The package path will be prepended to the URI. The benchmark will + // find the path relative to the root of the `target_dir`. + // + // This is needed because we could execute a Go project that is a sub-folder + // within a Git repository, then we won't copy the .git folder. Therefore, we + // have to resolve the .git relative path in go-runner and then combine it. + let relative_package_path = utils::get_git_relative_path(&package.dir) + .to_string_lossy() + .into(); + debug!("Relative package path: {relative_package_path}"); + + let metadata = GoRunnerMetadata { + profile_folder: std::env::var("CODSPEED_PROFILE_FOLDER").unwrap_or("/tmp".into()), + relative_package_path, + }; + fs::write( + target_dir.path().join("go-runner.metadata"), + serde_json::to_string_pretty(&metadata)?, + ) + .context("Failed to write go-runner.metadata file")?; + // Get files that need to be renamed first let files = package .test_go_files diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index 60ba8161..5a10e158 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -24,22 +24,15 @@ pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Resu let packages = BenchmarkPackage::from_project(project_dir, &cli.packages)?; info!("Discovered {} packages", packages.len()); - let mut bench_name_to_path = HashMap::new(); + let total_benchmarks: usize = packages.iter().map(|p| p.benchmarks.len()).sum(); + info!("Total benchmarks discovered: {total_benchmarks}"); + for package in &packages { for benchmark in &package.benchmarks { - // Create absolute path and immediately convert to git-relative path - let abs_path = package.module.dir.join(&benchmark.file_path); - let git_relative_path = crate::utils::get_git_relative_path(&abs_path); - bench_name_to_path.insert(benchmark.name.clone(), git_relative_path); + info!("Found {:30} in {:?}", benchmark.name, benchmark.file_path); } } - let total_benchmarks: usize = packages.iter().map(|p| p.benchmarks.len()).sum(); - info!("Total benchmarks discovered: {total_benchmarks}"); - for (name, path) in &bench_name_to_path { - info!("Found {name:30} in {path:?}"); - } - // 2. Generate codspeed runners, build binaries, and execute them for package in &packages { info!("Generating custom runner for package: {}", package.name); @@ -55,13 +48,13 @@ pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Resu } // 3. Collect the results - collect_walltime_results(bench_name_to_path)?; + collect_walltime_results()?; Ok(()) } // TODO: This should be merged with codspeed-rust/codspeed/walltime_results.rs -fn collect_walltime_results(bench_name_to_path: HashMap) -> anyhow::Result<()> { +fn collect_walltime_results() -> anyhow::Result<()> { let profile_dir = std::env::var("CODSPEED_PROFILE_FOLDER") .context("CODSPEED_PROFILE_FOLDER env var not set")?; let profile_dir = PathBuf::from(&profile_dir); @@ -71,19 +64,10 @@ fn collect_walltime_results(bench_name_to_path: HashMap) -> any let mut benchmarks_by_pid: HashMap> = HashMap::new(); for raw in raw_results { - // We only parse the `func Benchmark*` name which is the first part of the URI - let func_name = raw - .benchmark_name - .split("::") - .next() - .unwrap_or(&raw.benchmark_name); - let file_path = bench_name_to_path - .get(func_name) - .map(|p| p.to_string_lossy().to_string()); benchmarks_by_pid .entry(raw.pid) .or_default() - .push(raw.into_walltime_benchmark(file_path)); + .push(raw.into_walltime_benchmark()); } for (pid, walltime_benchmarks) in benchmarks_by_pid { diff --git a/go-runner/src/results/raw_result.rs b/go-runner/src/results/raw_result.rs index de940382..731c7246 100644 --- a/go-runner/src/results/raw_result.rs +++ b/go-runner/src/results/raw_result.rs @@ -7,7 +7,8 @@ use crate::results::walltime_results::WalltimeBenchmark; // WARN: Keep in sync with Golang "testing" fork (benchmark.go) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RawResult { - pub benchmark_name: String, + pub name: String, + pub uri: String, pub pid: u32, pub codspeed_time_per_round_ns: Vec, @@ -32,12 +33,7 @@ impl RawResult { .collect()) } - pub fn into_walltime_benchmark(self, file_path: Option) -> WalltimeBenchmark { - let name = self.benchmark_name; - - let file = file_path.as_deref().unwrap_or("unknown"); - let uri = format!("{file}::{name}"); - + pub fn into_walltime_benchmark(self) -> WalltimeBenchmark { let times_per_round_ns = self .codspeed_time_per_round_ns .iter() @@ -52,7 +48,13 @@ impl RawResult { .collect() }; - WalltimeBenchmark::from_runtime_data(name, uri, iters_per_round, times_per_round_ns, None) + WalltimeBenchmark::from_runtime_data( + self.name, + self.uri, + iters_per_round, + times_per_round_ns, + None, + ) } } @@ -63,33 +65,16 @@ mod tests { #[test] fn test_raw_result_deserialization() { let json_data = r#"{ - "benchmark_name": "BenchmarkFibonacci20-16", + "name": "BenchmarkFibonacci20-16", + "uri": "pkg/foo/fib_test.go::BenchmarkFibonacci20-16", "pid": 777767, "codspeed_time_per_round_ns": [1000, 2000, 3000] }"#; let result: RawResult = serde_json::from_str(json_data).unwrap(); - assert_eq!(result.benchmark_name, "BenchmarkFibonacci20-16"); + assert_eq!(result.name, "BenchmarkFibonacci20-16"); assert_eq!(result.pid, 777767); assert_eq!(result.codspeed_time_per_round_ns.len(), 3); assert_eq!(result.codspeed_iters_per_round.len(), 0); // Default: 1 per round } - - #[test] - fn test_into_walltime_benchmark_with_file_path() { - let raw_result = RawResult { - benchmark_name: "BenchmarkFibonacci20-16".to_string(), - pid: 777767, - codspeed_time_per_round_ns: vec![1000, 2000, 3000], - codspeed_iters_per_round: vec![], - }; - - // Test with file path - should not panic and create successfully - let _walltime_bench = raw_result - .clone() - .into_walltime_benchmark(Some("pkg/foo/fib_test.go".to_string())); - - // Test without file path (should default to TODO) - should not panic and create successfully - let _walltime_bench_no_path = raw_result.into_walltime_benchmark(None); - } } diff --git a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap index fb6cacd1..8fb1cf8f 100644 --- a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap +++ b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_0.snap @@ -13,184 +13,8 @@ expression: content }, "benchmarks": [ { - "name": "BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", - "uri": "example/fib_test.go::BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkFibonacci20_Loop", - "uri": "example/fib_test.go::BenchmarkFibonacci20_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkFibonacci20_bN", - "uri": "example/fib_test.go::BenchmarkFibonacci20_bN", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep100ns", - "uri": "example/sleep_test.go::BenchmarkSleep100ns", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep100ns_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep100ns_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep100us", - "uri": "example/sleep_test.go::BenchmarkSleep100us", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep100us_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep100us_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep10ms", - "uri": "example/sleep_test.go::BenchmarkSleep10ms", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep10ms_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep10ms_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep10us", - "uri": "example/sleep_test.go::BenchmarkSleep10us", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep10us_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep10us_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep1ms", - "uri": "example/sleep_test.go::BenchmarkSleep1ms", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep1ms_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep1ms_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep1us", - "uri": "example/sleep_test.go::BenchmarkSleep1us", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep1us_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep1us_Loop", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep50ms", - "uri": "example/sleep_test.go::BenchmarkSleep50ms", - "config": { - "warmup_time_ns": null, - "min_round_time_ns": null, - "max_time_ns": null, - "max_rounds": null - }, - "stats": "[stats]" - }, - { - "name": "BenchmarkSleep50ms_Loop", - "uri": "example/sleep_test.go::BenchmarkSleep50ms_Loop", + "name": "BenchmarkExample", + "uri": "example/very/nested/module/example_test.go::BenchmarkExample", "config": { "warmup_time_ns": null, "min_round_time_ns": null, diff --git a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_1.snap b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_1.snap new file mode 100644 index 00000000..fb6cacd1 --- /dev/null +++ b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@example_1.snap @@ -0,0 +1,203 @@ +--- +source: go-runner/src/integration_tests.rs +expression: content +--- +{ + "creator": { + "name": "codspeed-go", + "version": "0.1.0", + "pid": "[pid]" + }, + "instrument": { + "type": "walltime" + }, + "benchmarks": [ + { + "name": "BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "uri": "example/fib_test.go::BenchmarkFibonacci10::fibonacci(10)::fibonacci(10)", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_Loop", + "uri": "example/fib_test.go::BenchmarkFibonacci20_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkFibonacci20_bN", + "uri": "example/fib_test.go::BenchmarkFibonacci20_bN", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns", + "uri": "example/sleep_test.go::BenchmarkSleep100ns", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100ns_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100ns_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us", + "uri": "example/sleep_test.go::BenchmarkSleep100us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep100us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep100us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms", + "uri": "example/sleep_test.go::BenchmarkSleep10ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us", + "uri": "example/sleep_test.go::BenchmarkSleep10us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep10us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep10us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms", + "uri": "example/sleep_test.go::BenchmarkSleep1ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us", + "uri": "example/sleep_test.go::BenchmarkSleep1us", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep1us_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep1us_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms", + "uri": "example/sleep_test.go::BenchmarkSleep50ms", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + }, + { + "name": "BenchmarkSleep50ms_Loop", + "uri": "example/sleep_test.go::BenchmarkSleep50ms_Loop", + "config": { + "warmup_time_ns": null, + "min_round_time_ns": null, + "max_time_ns": null, + "max_rounds": null + }, + "stats": "[stats]" + } + ] +} diff --git a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@zerolog_1.snap b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@zerolog_1.snap index 1d486fc1..aeb6aa35 100644 --- a/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@zerolog_1.snap +++ b/go-runner/src/snapshots/codspeed_go_runner__integration_tests__assert_results_snapshots@zerolog_1.snap @@ -146,7 +146,7 @@ expression: content }, { "name": "BenchmarkAppendString::EncodingFirst", - "uri": "internal/json/string_test.go::BenchmarkAppendString::EncodingFirst", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::EncodingFirst", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -157,7 +157,7 @@ expression: content }, { "name": "BenchmarkAppendString::EncodingLast", - "uri": "internal/json/string_test.go::BenchmarkAppendString::EncodingLast", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::EncodingLast", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -168,7 +168,7 @@ expression: content }, { "name": "BenchmarkAppendString::EncodingMiddle", - "uri": "internal/json/string_test.go::BenchmarkAppendString::EncodingMiddle", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::EncodingMiddle", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -179,7 +179,7 @@ expression: content }, { "name": "BenchmarkAppendString::MultiBytesFirst", - "uri": "internal/json/string_test.go::BenchmarkAppendString::MultiBytesFirst", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::MultiBytesFirst", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -190,7 +190,7 @@ expression: content }, { "name": "BenchmarkAppendString::MultiBytesLast", - "uri": "internal/json/string_test.go::BenchmarkAppendString::MultiBytesLast", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::MultiBytesLast", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -201,7 +201,7 @@ expression: content }, { "name": "BenchmarkAppendString::MultiBytesMiddle", - "uri": "internal/json/string_test.go::BenchmarkAppendString::MultiBytesMiddle", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::MultiBytesMiddle", "config": { "warmup_time_ns": null, "min_round_time_ns": null, @@ -212,7 +212,7 @@ expression: content }, { "name": "BenchmarkAppendString::NoEncoding", - "uri": "internal/json/string_test.go::BenchmarkAppendString::NoEncoding", + "uri": "internal/cbor/string_test.go::BenchmarkAppendString::NoEncoding", "config": { "warmup_time_ns": null, "min_round_time_ns": null, diff --git a/go-runner/src/utils.rs b/go-runner/src/utils.rs index 2ef3a05d..2a5be482 100644 --- a/go-runner/src/utils.rs +++ b/go-runner/src/utils.rs @@ -19,8 +19,6 @@ pub fn copy_dir_recursively(src: impl AsRef, dst: impl AsRef) -> io: Ok(()) } -// WARN: Git-related helper functions have been taken from codspeed-rust. Keep in sync! - fn get_parent_git_repo_path(abs_path: &Path) -> io::Result { if abs_path.join(".git").exists() { Ok(abs_path.to_path_buf()) diff --git a/testing/testing/benchmark.go b/testing/testing/benchmark.go index 2abb06d8..be424462 100644 --- a/testing/testing/benchmark.go +++ b/testing/testing/benchmark.go @@ -15,6 +15,7 @@ import ( "math" "os" "path/filepath" + "reflect" "runtime" "slices" "strconv" @@ -834,12 +835,37 @@ func (s *benchState) processBench(b *B) { // ############################################################################################ // START CODSPEED type RawResults struct { - BenchmarkName string `json:"benchmark_name"` + Name string `json:"name"` + Uri string `json:"uri"` Pid int `json:"pid"` CodspeedTimePerRoundNs []time.Duration `json:"codspeed_time_per_round_ns"` CodspeedItersPerRound []int64 `json:"codspeed_iters_per_round"` } + // Find the filename of the benchmark file + var benchFile string + if b.benchFunc != nil { + pc := reflect.ValueOf(b.benchFunc).Pointer() + fn := runtime.FuncForPC(pc) + if fn == nil { + continue + } + + file, _ := fn.FileLine(pc) + if strings.HasSuffix(file, "_codspeed.go") { + benchFile = file + } + } + + if benchFile == "" { + panic("Could not determine benchmark file name") + } + + relativeBenchFile := getGitRelativePath(benchFile) + if strings.HasSuffix(relativeBenchFile, "_codspeed.go") { + relativeBenchFile = strings.TrimSuffix(relativeBenchFile, "_codspeed.go") + "_test.go" + } + // Build custom bench name with :: separator var nameParts []string current := &b.common @@ -858,20 +884,23 @@ func (s *benchState) processBench(b *B) { } current = current.parent } - customBenchName := strings.Join(nameParts, "::") + benchName = strings.Join(nameParts, "::") + benchUri := fmt.Sprintf("%s::%s", relativeBenchFile, benchName) rawResults := RawResults{ - BenchmarkName: customBenchName, + Name: benchName, + Uri: benchUri, Pid: os.Getpid(), CodspeedTimePerRoundNs: r.CodspeedTimePerRoundNs, CodspeedItersPerRound: r.CodspeedItersPerRound, } - codspeedProfileFolder := os.Getenv("CODSPEED_PROFILE_FOLDER") - if codspeedProfileFolder == "" { - panic("CODSPEED_PROFILE_FOLDER environment variable is not set") + goRunnerMetadata, err := findGoRunnerMetadata() + if err != nil { + panic(fmt.Sprintf("failed to get go runner metadata: %v", err)) } - if err := os.MkdirAll(filepath.Join(codspeedProfileFolder, "raw_results"), 0755); err != nil { + + if err := os.MkdirAll(filepath.Join(goRunnerMetadata.ProfileFolder, "raw_results"), 0755); err != nil { fmt.Fprintf(os.Stderr, "failed to create raw results directory: %v\n", err) continue } @@ -881,7 +910,7 @@ func (s *benchState) processBench(b *B) { fmt.Fprintf(os.Stderr, "failed to generate random filename: %v\n", err) continue } - rawResultsFile := filepath.Join(codspeedProfileFolder, "raw_results", fmt.Sprintf("%s.json", hex.EncodeToString(randomBytes))) + rawResultsFile := filepath.Join(goRunnerMetadata.ProfileFolder, "raw_results", fmt.Sprintf("%s.json", hex.EncodeToString(randomBytes))) file, err := os.Create(rawResultsFile) if err != nil { fmt.Fprintf(os.Stderr, "failed to create raw results file: %v\n", err) @@ -902,7 +931,7 @@ func (s *benchState) processBench(b *B) { defer file.Close() // Send pid and executed benchmark to the runner - b.codspeed.instrument_hooks.SetExecutedBenchmark(uint32(os.Getpid()), customBenchName) + b.codspeed.instrument_hooks.SetExecutedBenchmark(uint32(os.Getpid()), benchUri) // END CODSPEED // ############################################################################################ diff --git a/testing/testing/codspeed.go b/testing/testing/codspeed.go new file mode 100644 index 00000000..40b4162a --- /dev/null +++ b/testing/testing/codspeed.go @@ -0,0 +1,68 @@ +package testing + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" +) + +type GoRunnerMetadata struct { + ProfileFolder string `json:"profile_folder"` + RelativePackagePath string `json:"relative_package_path"` +} + +func findGoRunnerMetadata() (*GoRunnerMetadata, error) { + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + + // Search up the directory tree for go-runner.metadata + currentDir := cwd + for { + metadataPath := filepath.Join(currentDir, "go-runner.metadata") + data, err := os.ReadFile(metadataPath) + if err == nil { + var metadata GoRunnerMetadata + err = json.Unmarshal(data, &metadata) + if err != nil { + return nil, err + } + return &metadata, nil + } + + parentDir := filepath.Dir(currentDir) + if parentDir == currentDir { + // Reached the root directory + break + } + currentDir = parentDir + } + + return nil, os.ErrNotExist +} + +func getGitRelativePath(absPath string) string { + canonicalizedAbsPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + panic(fmt.Sprintf("failed to evaluate symlinks for path %s: %v", absPath, err)) + } + + cwd, err := os.Getwd() + if err != nil { + panic(fmt.Sprintf("failed to get current working directory: %v", err)) + } + + cwdRelativePath, err := filepath.Rel(cwd, canonicalizedAbsPath) + if err != nil { + panic(fmt.Sprintf("failed to compute relative path from %s to %s: %v", cwd, canonicalizedAbsPath, err)) + } + + goRunnerMetadata, err := findGoRunnerMetadata() + if err != nil { + panic(fmt.Sprintf("failed to find go-runner metadata: %v", err)) + } + + return filepath.Join(goRunnerMetadata.RelativePackagePath, cwdRelativePath) +} From aa4b9f5cbcab82e22d5b1067b963c81aa6c7a57e Mon Sep 17 00:00:00 2001 From: not-matthias Date: Mon, 1 Sep 2025 20:52:51 +0200 Subject: [PATCH 17/17] fix: remove profile env var mutex in test --- go-runner/src/builder/templater.rs | 7 +++++-- go-runner/src/integration_tests.rs | 8 +------- go-runner/src/lib.rs | 24 ++++++++++-------------- go-runner/src/main.rs | 3 ++- go-runner/tests/utils.rs | 13 ++----------- 5 files changed, 20 insertions(+), 35 deletions(-) diff --git a/go-runner/src/builder/templater.rs b/go-runner/src/builder/templater.rs index 8c29ef2a..9122f120 100644 --- a/go-runner/src/builder/templater.rs +++ b/go-runner/src/builder/templater.rs @@ -21,7 +21,10 @@ struct TemplateData { module_name: String, } -pub fn run(package: &BenchmarkPackage) -> anyhow::Result<(TempDir, PathBuf)> { +pub fn run>( + package: &BenchmarkPackage, + profile_dir: P, +) -> anyhow::Result<(TempDir, PathBuf)> { // 1. Copy the whole module to a build directory let target_dir = TempDir::new()?; std::fs::create_dir_all(&target_dir).context("Failed to create target directory")?; @@ -41,7 +44,7 @@ pub fn run(package: &BenchmarkPackage) -> anyhow::Result<(TempDir, PathBuf)> { debug!("Relative package path: {relative_package_path}"); let metadata = GoRunnerMetadata { - profile_folder: std::env::var("CODSPEED_PROFILE_FOLDER").unwrap_or("/tmp".into()), + profile_folder: profile_dir.as_ref().to_string_lossy().into(), relative_package_path, }; fs::write( diff --git a/go-runner/src/integration_tests.rs b/go-runner/src/integration_tests.rs index 5f0f5f5b..30254935 100644 --- a/go-runner/src/integration_tests.rs +++ b/go-runner/src/integration_tests.rs @@ -1,7 +1,6 @@ use itertools::Itertools; use rstest::rstest; use std::path::{Path, PathBuf}; -use std::sync::Mutex; use tempfile::TempDir; use crate::results::walltime_results::WalltimeResults; @@ -66,18 +65,13 @@ fn test_build_and_run(#[case] project_name: &str) { .join("testdata/projects") .join(project_name); - // Mutex to prevent concurrent tests from interfering with CODSPEED_PROFILE_FOLDER env var - static ENV_MUTEX: Mutex<()> = Mutex::new(()); - let _env_guard = ENV_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); - let temp_dir = TempDir::new().unwrap(); let profile_dir = temp_dir.path().join("profile"); - unsafe { std::env::set_var("CODSPEED_PROFILE_FOLDER", &profile_dir) }; let cli = crate::cli::Cli { benchtime: "1x".into(), ..Default::default() }; - if let Err(error) = crate::run_benchmarks(project_dir.as_path(), &cli) { + if let Err(error) = crate::run_benchmarks(&profile_dir, project_dir.as_path(), &cli) { panic!("Benchmarks couldn't run: {error}"); } diff --git a/go-runner/src/lib.rs b/go-runner/src/lib.rs index 5a10e158..7d4cb0bc 100644 --- a/go-runner/src/lib.rs +++ b/go-runner/src/lib.rs @@ -1,8 +1,5 @@ use crate::{builder::BenchmarkPackage, prelude::*}; -use std::{ - collections::HashMap, - path::{Path, PathBuf}, -}; +use std::{collections::HashMap, path::Path}; pub mod builder; pub mod cli; @@ -15,9 +12,11 @@ pub(crate) mod utils; mod integration_tests; /// Builds and runs the specified Go project benchmarks, writing results to the .codspeed folder. -pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Result<()> { - let profile_dir = std::env::var("CODSPEED_PROFILE_FOLDER") - .context("CODSPEED_PROFILE_FOLDER env var not set")?; +pub fn run_benchmarks>( + profile_dir: P, + project_dir: &Path, + cli: &crate::cli::Cli, +) -> anyhow::Result<()> { std::fs::remove_dir_all(&profile_dir).ok(); // 1. Build phase - Benchmark and package discovery @@ -36,7 +35,7 @@ pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Resu // 2. Generate codspeed runners, build binaries, and execute them for package in &packages { info!("Generating custom runner for package: {}", package.name); - let (_target_dir, runner_path) = builder::templater::run(package)?; + let (_target_dir, runner_path) = builder::templater::run(package, &profile_dir)?; info!("Building binary for package: {}", package.name); let binary_path = builder::build_binary(&runner_path)?; @@ -48,17 +47,14 @@ pub fn run_benchmarks(project_dir: &Path, cli: &crate::cli::Cli) -> anyhow::Resu } // 3. Collect the results - collect_walltime_results()?; + collect_walltime_results(profile_dir.as_ref())?; Ok(()) } // TODO: This should be merged with codspeed-rust/codspeed/walltime_results.rs -fn collect_walltime_results() -> anyhow::Result<()> { - let profile_dir = std::env::var("CODSPEED_PROFILE_FOLDER") - .context("CODSPEED_PROFILE_FOLDER env var not set")?; - let profile_dir = PathBuf::from(&profile_dir); - let raw_results = results::raw_result::RawResult::parse_folder(&profile_dir)?; +fn collect_walltime_results(profile_dir: &Path) -> anyhow::Result<()> { + let raw_results = results::raw_result::RawResult::parse_folder(profile_dir)?; info!("Parsed {} raw results", raw_results.len()); let mut benchmarks_by_pid: HashMap> = diff --git a/go-runner/src/main.rs b/go-runner/src/main.rs index bd78b8d5..9a0b08cd 100644 --- a/go-runner/src/main.rs +++ b/go-runner/src/main.rs @@ -9,7 +9,8 @@ fn main() -> anyhow::Result<()> { .init(); let cli = Cli::parse(); - codspeed_go_runner::run_benchmarks(Path::new("."), &cli)?; + let profile_dir = std::env::var("CODSPEED_PROFILE_FOLDER").unwrap_or("/tmp".into()); + codspeed_go_runner::run_benchmarks(profile_dir, Path::new("."), &cli)?; Ok(()) } diff --git a/go-runner/tests/utils.rs b/go-runner/tests/utils.rs index 8ab713d1..4794b96b 100644 --- a/go-runner/tests/utils.rs +++ b/go-runner/tests/utils.rs @@ -1,19 +1,10 @@ use codspeed_go_runner::{builder, builder::BenchmarkPackage, cli::Cli, runner}; use std::path::Path; -use std::sync::Mutex; -use tempfile::TempDir; /// Helper function to run a single package with arguments pub fn run_package_with_args(package: &BenchmarkPackage, args: &[&str]) -> anyhow::Result { - // Mutex to prevent concurrent tests from interfering with CODSPEED_PROFILE_FOLDER env var - static ENV_MUTEX: Mutex<()> = Mutex::new(()); - let _env_guard = ENV_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); - - let temp_dir = TempDir::new()?; - let profile_dir = temp_dir.path().join("profile"); - unsafe { std::env::set_var("CODSPEED_PROFILE_FOLDER", &profile_dir) }; - - let (_dir, runner_path) = builder::templater::run(package)?; + let profile_dir = tempfile::TempDir::new()?; + let (_dir, runner_path) = builder::templater::run(package, profile_dir.as_ref())?; let binary_path = builder::build_binary(&runner_path)?; runner::run_with_stdout(&binary_path, args) }