diff --git a/.gitignore b/.gitignore index 6bb476b5f..7c8c8abc2 100644 --- a/.gitignore +++ b/.gitignore @@ -75,3 +75,6 @@ TAGS /.cache/ /compile_commands.json +# 'nix build' resulting symlink +result + diff --git a/README.md b/README.md index 969e60bf4..ad146dcc7 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ fn void main() ### Current status -The current stable version of the compiler is **version 0.6.3**. +The current stable version of the compiler is **version 0.6.4**. The upcoming 0.6.x releases will focus on expanding the standard library. Follow the issues [here](https://github.com/c3lang/c3c/issues). diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..76e12ad98 --- /dev/null +++ b/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1726560853, + "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1730958623, + "narHash": "sha256-JwQZIGSYnRNOgDDoIgqKITrPVil+RMWHsZH1eE1VGN0=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "85f7e662eda4fa3a995556527c87b2524b691933", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..1d22bc7c9 --- /dev/null +++ b/flake.nix @@ -0,0 +1,35 @@ +{ + description = "C3 compiler flake"; + + inputs = { + nixpkgs.url = "github:nixos/nixpkgs?ref=nixpkgs-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = { self, ... } @ inputs: inputs.flake-utils.lib.eachDefaultSystem + (system: + let pkgs = import inputs.nixpkgs { inherit system; }; in + { + packages = { + default = self.packages.${system}.c3c; + + c3c = pkgs.callPackage ./nix/default.nix {}; + + c3c-debug = pkgs.callPackage ./nix/default.nix { + debug = true; + }; + + c3c-nochecks = pkgs.callPackage ./nix/default.nix { + debug = true; + checks = false; + }; + }; + + devShells = { + default = pkgs.callPackage ./nix/shell.nix { + c3c = self.packages.${system}.c3c-nochecks; + }; + }; + } + ); +} diff --git a/nix/default.nix b/nix/default.nix new file mode 100644 index 000000000..ae910343f --- /dev/null +++ b/nix/default.nix @@ -0,0 +1,85 @@ +{ + lib, + llvmPackages, + cmake, + python3, + curl, + libxml2, + libffi, + xar, + debug ? false, + checks ? true, +}: let + inherit (builtins) baseNameOf toString readFile elemAt; + inherit (lib.sources) cleanSourceWith cleanSource; + inherit (lib.lists) findFirst; + inherit (lib.asserts) assertMsg; + inherit (lib.strings) hasInfix hasSuffix splitString removeSuffix removePrefix optionalString; +in +llvmPackages.stdenv.mkDerivation (finalAttrs: { + pname = "c3c${optionalString debug "-debug"}"; + version = let + findLine = findFirst (x: hasInfix "COMPILER_VERSION" x) "none"; + foundLine = findLine ( splitString "\n" ( readFile ../src/version.h ) ); + version = removeSuffix "\"" ( removePrefix "\"" ( elemAt ( splitString " " foundLine ) 2 ) ); + in + assert assertMsg (foundLine != "none") "No COMPILER_VERSION substring was found in version.h"; + version; + + src = cleanSourceWith { + filter = _path: _type: !(hasSuffix ".nix" (baseNameOf(toString _path))); + src = cleanSource ../.; + }; + + postPatch = '' + substituteInPlace CMakeLists.txt \ + --replace-fail "\''${LLVM_LIBRARY_DIRS}" "${llvmPackages.lld.lib}/lib ${llvmPackages.llvm.lib}/lib" + ''; + + cmakeBuildType = if debug then "Debug" else "Release"; + + cmakeFlags = [ + "-DC3_ENABLE_CLANGD_LSP=${if debug then "ON" else "OFF"}" + ]; + + nativeBuildInputs = [ cmake ]; + + postBuild = optionalString debug '' + mkdir $out + substituteInPlace compile_commands.json \ + --replace "/build/source/" "$src/" + cp compile_commands.json $out/compile_commands.json + ''; + + buildInputs = [ + llvmPackages.llvm + llvmPackages.lld + curl + libxml2 + libffi + ] ++ lib.optionals llvmPackages.stdenv.hostPlatform.isDarwin [ xar ]; + + nativeCheckInputs = [ python3 ]; + + doCheck = llvmPackages.stdenv.system == "x86_64-linux" && checks; + + checkPhase = '' + runHook preCheck + ( cd ../resources/testproject; ../../build/c3c build ) + ( cd ../test; python src/tester.py ../build/c3c test_suite ) + runHook postCheck + ''; + + meta = with lib; { + description = "Compiler for the C3 language"; + homepage = "https://github.com/c3lang/c3c"; + license = licenses.lgpl3Only; + maintainers = with maintainers; [ + luc65r + anas + ]; + platforms = platforms.all; + mainProgram = "c3c"; + }; +}) + diff --git a/nix/shell.nix b/nix/shell.nix new file mode 100644 index 000000000..63f3695d3 --- /dev/null +++ b/nix/shell.nix @@ -0,0 +1,20 @@ +{ + mkShell, + clang-tools, + c3c, +}: + +mkShell { + inputsFrom = [ + c3c + ]; + + packages = [ + clang-tools + c3c + ]; + + shellHook = '' + ln -sf ${c3c}/compile_commands.json compile_commands.json + ''; +} diff --git a/releasenotes.md b/releasenotes.md index 1db95743a..6057f519f 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,5 +1,16 @@ # C3C Release Notes +## 0.6.5 Change list + +### Changes / improvements +- Allow splat in initializers. +- Init command will now add `test-sources` to `project.json` #1520 + +### Fixes +- Fix bug where `a > 0 ? f() : g()` could cause a compiler crash if both returned `void!`. + +### Stdlib changes + ## 0.6.4 Change list ### Changes / improvements diff --git a/src/build/build_options.c b/src/build/build_options.c index 7cda98ea4..4c1000ec6 100644 --- a/src/build/build_options.c +++ b/src/build/build_options.c @@ -1202,7 +1202,7 @@ static inline bool at_end() static inline const char *next_arg() { - assert(!at_end()); + ASSERT0(!at_end()); current_arg = args[++arg_index]; return current_arg; } diff --git a/src/build/project.c b/src/build/project.c index 5de759996..ceacbedb6 100644 --- a/src/build/project.c +++ b/src/build/project.c @@ -44,6 +44,7 @@ const char *project_default_keys[][2] = { {"single-module", "Compile all modules together, enables more inlining."}, {"soft-float", "Output soft-float functions."}, {"sources", "Paths to project sources for all targets."}, + {"test-sources", "Paths to project test sources for all targets."}, {"strip-unused", "Strip unused code and globals from the output. (default: true)"}, {"symtab", "Sets the preferred symtab size."}, {"target", "Compile for a particular architecture + OS target."}, @@ -116,6 +117,8 @@ const char* project_target_keys[][2] = { {"soft-float", "Output soft-float functions."}, {"sources", "Additional paths to project sources for the target."}, {"sources-override", "Paths to project sources for this target, overriding global settings."}, + {"test-sources", "Additional paths to project test sources for the target."}, + {"test-sources-override", "Paths to project test sources for this target, overriding global settings."}, {"strip-unused", "Strip unused code and globals from the output. (default: true)"}, {"symtab", "Sets the preferred symtab size."}, {"target", "Compile for a particular architecture + OS target."}, @@ -424,7 +427,7 @@ static void load_into_build_target(const char *filename, JSONObject *json, const static void project_add_target(const char *filename, Project *project, BuildTarget *default_target, JSONObject *json, const char *name, const char *type, TargetType target_type) { - assert(json->type == J_OBJECT); + ASSERT0(json->type == J_OBJECT); BuildTarget *target = CALLOCS(BuildTarget); *target = *default_target; vec_add(project->targets, target); @@ -444,7 +447,7 @@ static void project_add_target(const char *filename, Project *project, BuildTarg static void project_add_targets(const char *filename, Project *project, JSONObject *project_data) { - assert(project_data->type == J_OBJECT); + ASSERT0(project_data->type == J_OBJECT); BuildTarget default_target = default_build_target; load_into_build_target(filename, project_data, NULL, &default_target); diff --git a/src/build/project_creation.c b/src/build/project_creation.c index ff37d8997..7374d3b99 100644 --- a/src/build/project_creation.c +++ b/src/build/project_creation.c @@ -20,6 +20,8 @@ const char* JSON_EXE = " \"version\": \"0.1.0\",\n" " // Sources compiled for all targets.\n" " \"sources\": [ \"src/**\" ],\n" + " // Test sources compiled for all targets.\n" + " \"test-sources\": [ \"test/**\" ],\n" " // C sources if the project also compiles C sources\n" " // relative to the project file.\n" " // \"c-sources\": [ \"csource/**\" ],\n" @@ -63,6 +65,8 @@ const char* JSON_STATIC = " \"version\": \"0.1.0\",\n" " // Sources compiled for all targets.\n" " \"sources\": [ \"src/**\" ],\n" + " // Test sources compiled for all targets.\n" + " \"test-sources\": [ \"test/**\" ],\n" " // C sources if the project also compiles C sources\n" " // relative to the project file.\n" " // \"c-sources\": [ \"csource/**\" ],\n" @@ -104,6 +108,8 @@ const char* JSON_DYNAMIC = " \"version\": \"0.1.0\",\n" " // Sources compiled for all targets.\n" " \"sources\": [ \"src/**\" ],\n" + " // Test sources compiled for all targets.\n" + " \"test-sources\": [ \"test/**\" ],\n" " // C sources if the project also compiles C sources\n" " // relative to the project file.\n" " // \"c-sources\": [ \"csource/**\" ],\n" diff --git a/src/compiler/abi/c_abi.c b/src/compiler/abi/c_abi.c index ac9931d8e..d611df6c1 100644 --- a/src/compiler/abi/c_abi.c +++ b/src/compiler/abi/c_abi.c @@ -61,10 +61,10 @@ bool abi_arg_is_indirect(ABIArgInfo *info) ABIArgInfo *abi_arg_new_indirect_realigned(AlignSize alignment, Type *by_val_type) { - assert(alignment > 0); + ASSERT0(alignment > 0); ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT); info->indirect.alignment = alignment; - assert(info->indirect.alignment); + ASSERT0(info->indirect.alignment); info->attributes.realign = true; info->indirect.type = by_val_type; info->attributes.by_val = true; @@ -77,7 +77,7 @@ ABIArgInfo *abi_arg_new_indirect_by_val(Type *by_val_type) info->indirect.alignment = type_abi_alignment(by_val_type); info->indirect.type = by_val_type; info->attributes.by_val = true; - assert(info->indirect.alignment); + ASSERT0(info->indirect.alignment); return info; } @@ -85,7 +85,7 @@ ABIArgInfo *abi_arg_new_indirect_not_by_val(Type *type) { ABIArgInfo *info = abi_arg_new(ABI_ARG_INDIRECT); info->indirect.alignment = type_abi_alignment(type); - assert(info->indirect.alignment); + ASSERT0(info->indirect.alignment); info->indirect.type = type; info->attributes.by_val = false; return info; @@ -175,7 +175,7 @@ ABIArgInfo *abi_arg_new_direct_coerce_int(void) ABIArgInfo *abi_arg_new_direct_coerce_type(Type *type) { - assert(type); + ASSERT0(type); ABIArgInfo *info = abi_arg_new(ABI_ARG_DIRECT_COERCE); info->direct_coerce_type = type->canonical; return info; @@ -191,7 +191,7 @@ ABIArgInfo *abi_arg_new_direct_struct_expand_i32(uint8_t elements) void c_abi_func_create(FunctionPrototype *proto) { - assert(!proto->is_resolved); + ASSERT0(!proto->is_resolved); proto->is_resolved = true; switch (compiler.platform.abi) { diff --git a/src/compiler/abi/c_abi_aarch64.c b/src/compiler/abi/c_abi_aarch64.c index 6c9ca0153..90c1bf734 100644 --- a/src/compiler/abi/c_abi_aarch64.c +++ b/src/compiler/abi/c_abi_aarch64.c @@ -57,7 +57,7 @@ ABIArgInfo *aarch64_coerce_illegal_vector(Type *type) UNREACHABLE }*/ } - assert(type->type_kind == TYPE_VECTOR); + ASSERT0(type->type_kind == TYPE_VECTOR); TypeSize size = type_size(type); // CLANG: Android promotes char[<2>] to ushort, not uint @@ -107,7 +107,7 @@ ABIArgInfo *aarch64_classify_argument_type(Type *type) unsigned members = 0; if (type_is_homogenous_aggregate(type, &base, &members)) { - assert(members < 128); + ASSERT0(members < 128); if (members > 1) { return abi_arg_new_direct_coerce_type(type_get_array(base, members)); @@ -134,7 +134,7 @@ ABIArgInfo *aarch64_classify_argument_type(Type *type) size = aligned_offset(size, alignment); // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. // For aggregates with 16-byte alignment, we use i128. - assert(alignment == 8 || alignment == 16); + ASSERT0(alignment == 8 || alignment == 16); if (alignment == 16) return abi_arg_new_direct_coerce_type(type_u128); ArraySize m = size / alignment; diff --git a/src/compiler/abi/c_abi_riscv.c b/src/compiler/abi/c_abi_riscv.c index 8409f2086..afa0bd3e5 100644 --- a/src/compiler/abi/c_abi_riscv.c +++ b/src/compiler/abi/c_abi_riscv.c @@ -7,13 +7,13 @@ static ABIArgInfo *riscv_coerce_and_expand_fpcc_struct(AbiType field1, unsigned field1_offset, AbiType field2, unsigned field2_offset) { - assert(abi_type_is_type(field1)); + ASSERT0(abi_type_is_type(field1)); if (!abi_type_is_valid(field2)) { return abi_arg_new_direct_coerce_type(field1.type); } - assert(abi_type_is_type(field2)); + ASSERT0(abi_type_is_type(field2)); Type *type2 = field2.type; ByteSize abi_type_size = type_size(type2); // Not on even offset, use packed semantics. @@ -132,10 +132,10 @@ static bool riscv_detect_fpcc_struct(Type *type, AbiType *field1_ref, unsigned * static ABIArgInfo *riscv_classify_argument_type(Type *type, bool is_fixed, unsigned *gprs, unsigned *fprs) { - assert(type == type->canonical); + ASSERT0(type == type->canonical); unsigned xlen = compiler.platform.riscv.xlen; - assert(is_power_of_two(xlen)); + ASSERT0(is_power_of_two(xlen)); ByteSize size = type_size(type); diff --git a/src/compiler/abi/c_abi_x64.c b/src/compiler/abi/c_abi_x64.c index 7a0341722..e2c2b8af7 100644 --- a/src/compiler/abi/c_abi_x64.c +++ b/src/compiler/abi/c_abi_x64.c @@ -122,7 +122,7 @@ ABIArgInfo *x64_indirect_result(Type *type, unsigned free_int_regs) */ ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *needed_registers) { - assert(x64_type_is_structure(type)); + ASSERT0(x64_type_is_structure(type)); // These are all passed in two registers. if (type->type_kind == TYPE_SLICE || type->type_kind == TYPE_ANY) @@ -132,7 +132,7 @@ ABIArgInfo *x64_classify_reg_call_struct_type_check(Type *type, Registers *neede } // Struct, err type handled => - assert(type->type_kind == TYPE_STRUCT); + ASSERT0(type->type_kind == TYPE_STRUCT); // Variable array structs are always passed by pointer. if (type->decl->has_variable_array) return x64_indirect_return_result(type); @@ -175,7 +175,7 @@ static X64Class x64_merge(X64Class accum, X64Class field) // 6. SSE // Accum should never be memory (we should have returned) or - assert(accum != CLASS_MEMORY); + ASSERT0(accum != CLASS_MEMORY); if (accum == field) return accum; // Swap @@ -305,7 +305,7 @@ void x64_classify_array(Type *type, ByteSize offset_base, X64Class *current, X64 if (*lo_class == CLASS_MEMORY || *hi_class == CLASS_MEMORY) break; } x64_classify_post_merge(size, lo_class, hi_class); - assert(*hi_class != CLASS_SSEUP || *lo_class == CLASS_SSE); + ASSERT0(*hi_class != CLASS_SSEUP || *lo_class == CLASS_SSE); } void x64_classify_vector(Type *type, ByteSize offset_base, X64Class *current, X64Class *lo_class, X64Class *hi_class, @@ -365,7 +365,7 @@ static Decl *x64_get_member_at_offset(Decl *decl, unsigned offset) if (member->offset > (ArrayIndex)offset) break; last_match = member; } - assert(last_match); + ASSERT0(last_match); return last_match; } @@ -616,7 +616,7 @@ AbiType x64_get_int_type_at_offset(Type *type, unsigned offset, Type *source_typ break; } ByteSize size = type_size(source_type); - assert(size != source_offset); + ASSERT0(size != source_offset); if (size - source_offset > 8) return abi_type_get(type_ulong); return abi_type_get_int_bits((size - source_offset) * 8); } @@ -649,7 +649,7 @@ static AbiType x64_get_byte_vector_type(Type *type) unsigned size = type_size(type); - assert(size == 16 || size == 32 || size == 64); + ASSERT0(size == 16 || size == 32 || size == 64); // Return a vector type based on the size. return abi_type_get(type_get_vector(type_double, size / 8)); @@ -659,7 +659,7 @@ static ABIArgInfo *x64_get_argument_pair_return(AbiType low_type, AbiType high_t { TypeSize low_size = abi_type_size(low_type); unsigned hi_start = aligned_offset(low_size, abi_type_abi_alignment(high_type)); - assert(hi_start == 8 && "Expected aligned with C-style structs."); + ASSERT0(hi_start == 8 && "Expected aligned with C-style structs."); return abi_arg_new_direct_pair(low_type, high_type); } @@ -673,8 +673,8 @@ ABIArgInfo *x64_classify_return(Type *return_type) x64_classify(return_type, 0, &lo_class, &hi_class, NAMED); // Invariants - assert(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY); - assert(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE); + ASSERT0(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY); + ASSERT0(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE); AbiType result_type = ABI_TYPE_EMPTY; switch (lo_class) @@ -685,7 +685,7 @@ ABIArgInfo *x64_classify_return(Type *return_type) return abi_arg_ignore(); } // If low part is padding, keep type null - assert(hi_class == CLASS_SSE || hi_class == CLASS_INTEGER); + ASSERT0(hi_class == CLASS_SSE || hi_class == CLASS_INTEGER); break; case CLASS_SSEUP: UNREACHABLE @@ -717,11 +717,11 @@ ABIArgInfo *x64_classify_return(Type *return_type) // Previously handled. break; case CLASS_INTEGER: - assert(lo_class != CLASS_NO_CLASS); + ASSERT0(lo_class != CLASS_NO_CLASS); high_part = x64_get_int_type_at_offset(return_type, 8, return_type, 8); break; case CLASS_SSE: - assert(lo_class != CLASS_NO_CLASS); + ASSERT0(lo_class != CLASS_NO_CLASS); high_part = abi_type_get(x64_get_sse_type_at_offset(return_type, 8, return_type, 8)); break; case CLASS_SSEUP: @@ -730,7 +730,7 @@ ABIArgInfo *x64_classify_return(Type *return_type) // vector register. // // SSEUP should always be preceded by SSE, just widen. - assert(lo_class == CLASS_SSE && "Unexpected SSEUp classification."); + ASSERT0(lo_class == CLASS_SSE && "Unexpected SSEUp classification."); result_type = x64_get_byte_vector_type(return_type); break; } @@ -748,7 +748,7 @@ ABIArgInfo *x64_classify_return(Type *return_type) } return abi_arg_new_direct_coerce_type(result_type.type->canonical); } - assert(result_type.int_bits_plus_1 - 1 == type_size(return_type) * 8); + ASSERT0(result_type.int_bits_plus_1 - 1 == type_size(return_type) * 8); return abi_arg_new_direct_coerce_int(); } @@ -764,14 +764,14 @@ ABIArgInfo *x64_classify_return(Type *return_type) */ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs, Registers *needed_registers, NamedArgument is_named) { - assert(type == type_lowering(type)); + ASSERT0(type == type_lowering(type)); X64Class hi_class; X64Class lo_class; x64_classify(type, 0, &lo_class, &hi_class, is_named); // Invariants - assert(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY); - assert(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE); + ASSERT0(hi_class != CLASS_MEMORY || lo_class == CLASS_MEMORY); + ASSERT0(hi_class != CLASS_SSEUP || lo_class == CLASS_SSE); AbiType result_type; *needed_registers = (Registers) { 0, 0 }; @@ -781,7 +781,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs { case CLASS_NO_CLASS: // Only C++ would leave 8 bytes of padding, so we can ignore that case. - assert(hi_class == CLASS_NO_CLASS); + ASSERT0(hi_class == CLASS_NO_CLASS); return abi_arg_ignore(); case CLASS_SSEUP: UNREACHABLE @@ -792,7 +792,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs result_type = x64_get_int_type_at_offset(type, 0, type, 0); if (hi_class == CLASS_NO_CLASS && type_is_promotable_int_bool(type)) { - assert(abi_type_is_type(result_type)); + ASSERT0(abi_type_is_type(result_type)); return abi_arg_new_direct_coerce_int_ext(result_type.type); } break; @@ -814,15 +814,15 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs needed_registers->int_registers++; high_part = x64_get_int_type_at_offset(type, 8, type, 8); // Return directly into high part. - assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff."); + ASSERT0(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff."); break; case CLASS_SSE: needed_registers->sse_registers++; high_part = abi_type_get(x64_get_sse_type_at_offset(type, 8, type, 8)); - assert(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff"); + ASSERT0(lo_class != CLASS_NO_CLASS && "empty first 8 bytes not allowed, this is C++ stuff"); break; case CLASS_SSEUP: - assert(lo_class == CLASS_SSE && "Unexpected SSEUp classification."); + ASSERT0(lo_class == CLASS_SSE && "Unexpected SSEUp classification."); result_type = x64_get_byte_vector_type(type); break; } @@ -843,7 +843,7 @@ static ABIArgInfo *x64_classify_argument_type(Type *type, unsigned free_int_regs } return abi_arg_new_direct_coerce_type(result); } - assert(result_type.int_bits_plus_1 - 1 == type_size(type) * 8); + ASSERT0(result_type.int_bits_plus_1 - 1 == type_size(type) * 8); return abi_arg_new_direct_coerce_int(); } diff --git a/src/compiler/abi/c_abi_x86.c b/src/compiler/abi/c_abi_x86.c index 68aabb3a5..47c0bac80 100644 --- a/src/compiler/abi/c_abi_x86.c +++ b/src/compiler/abi/c_abi_x86.c @@ -92,7 +92,7 @@ static ABIArgInfo *create_indirect_return_x86(Type *type, Regs *regs) static bool x86_should_return_type_in_reg(Type *type) { - assert(type->canonical == type); + ASSERT0(type->canonical == type); ByteSize size = type_size(type); if (size > 8) return false; @@ -226,7 +226,7 @@ static inline bool x86_is_mmxtype(Type *type) static inline bool x86_can_expand_indirect_aggregate_arg(Type *type) { - assert(type_is_abi_aggregate(type)); + ASSERT0(type_is_abi_aggregate(type)); // Test whether an argument type which is to be passed indirectly (on the // stack) would have the equivalent layout if it was expanded into separate @@ -373,7 +373,7 @@ static inline ABIArgInfo *x86_classify_vector(Regs *regs, Type *type) static inline ABIArgInfo *x86_classify_aggregate(CallABI call, Regs *regs, Type *type) { // Only called for aggregates. - assert(type_is_abi_aggregate(type)); + ASSERT0(type_is_abi_aggregate(type)); if (type_is_union_or_strukt(type) && type->decl->has_variable_array) { @@ -390,7 +390,7 @@ static inline ABIArgInfo *x86_classify_aggregate(CallABI call, Regs *regs, Type // Here we coerce the aggregate into a struct { i32, i32, ... } // but we do not generate this struct immediately here. unsigned size_in_regs = (size + 3) / 4; - assert(size_in_regs < 8); + ASSERT0(size_in_regs < 8); ABIArgInfo *info; if (size_in_regs > 1) { diff --git a/src/compiler/asm_target.c b/src/compiler/asm_target.c index 397312d26..57de01912 100644 --- a/src/compiler/asm_target.c +++ b/src/compiler/asm_target.c @@ -175,7 +175,7 @@ static inline void reg_instr_clob(PlatformTarget *target, const char *name, Clob unsigned param_count = 0; while (args && args[0] != 0) { - assert(param_count <= MAX_ASM_INSTRUCTION_PARAMS); + ASSERT0(param_count <= MAX_ASM_INSTRUCTION_PARAMS); instr->param[param_count++] = decode_arg_type(&args); } instr->param_count = param_count; @@ -188,7 +188,7 @@ static inline void reg_instr(PlatformTarget *target, const char *name, const cha int param_count = 0; while (args && args[0] != 0) { - assert(param_count <= MAX_ASM_INSTRUCTION_PARAMS); + ASSERT0(param_count <= MAX_ASM_INSTRUCTION_PARAMS); instr->param[param_count++] = decode_arg_type(&args); } instr->param_count = param_count; diff --git a/src/compiler/ast.c b/src/compiler/ast.c index fac8cbe80..99cba5ce4 100644 --- a/src/compiler/ast.c +++ b/src/compiler/ast.c @@ -174,7 +174,7 @@ Decl *decl_new_generated_var(Type *type, VarDeclKind kind, SourceSpan span) decl->var.kind = kind; decl->type = type; decl->alignment = type ? type_alloca_alignment(type) : 0; - assert(!type || !type_is_user_defined(type) || type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(!type || !type_is_user_defined(type) || type->decl->resolve_status == RESOLVE_DONE); decl->var.type_info = type_info_id_new_base(type, span); decl->resolve_status = RESOLVE_DONE; return decl; @@ -428,7 +428,7 @@ AlignSize decl_find_member_offset(Decl *decl, Decl *member) default: return NO_MATCH; } - assert(members); + ASSERT0(members); unsigned list = vec_size(members); for (unsigned i = 0; i < list; i++) { diff --git a/src/compiler/bigint.c b/src/compiler/bigint.c index 0188caff2..6952ef60b 100644 --- a/src/compiler/bigint.c +++ b/src/compiler/bigint.c @@ -41,7 +41,7 @@ UNUSED static char digit_to_char(uint8_t digit, bool upper) char *i128_to_string(Int128 op, uint64_t base, bool is_signed, bool use_prefix) { - assert(base >= 2 && base <= 16); + ASSERT0(base >= 2 && base <= 16); static char digits[16] = "0123456789ABCDEF"; char buffer[130]; char *loc = buffer; @@ -322,7 +322,7 @@ Int128 i128_from_float_unsigned(Real d) UNUSED bool i128_get_bit(const Int128 *op, int bit) { - assert(bit < 128 && bit >= 0); + ASSERT0(bit < 128 && bit >= 0); if (bit > 63) { return (op->high >> (bit - 64)) & 1; @@ -761,7 +761,7 @@ unsigned int_bits_needed(Int op) Int int_add(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_extend(i128_add(op1.i, op2.i), op1.type), op1.type }; } @@ -772,7 +772,7 @@ Int int_add64(Int op1, uint64_t op2) Int int_sub(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_extend(i128_sub(op1.i, op2.i), op1.type), op1.type }; } @@ -783,7 +783,7 @@ Int int_sub64(Int op1, uint64_t op2) Int int_mul(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_extend(i128_mult(op1.i, op2.i), op1.type), op1.type }; } @@ -821,7 +821,7 @@ Int int_conv(Int op, TypeKind to_type) Int int_div(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); Int128 res; if (type_kind_is_signed(op1.type)) { @@ -836,7 +836,7 @@ Int int_div(Int op1, Int op2) Int int_rem(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); Int128 res; if (type_kind_is_signed(op1.type)) { @@ -851,19 +851,19 @@ Int int_rem(Int op1, Int op2) Int int_and(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_and(op1.i, op2.i), op1.type }; } Int int_or(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_or(op1.i, op2.i), op1.type }; } Int int_xor(Int op1, Int op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Int){ i128_xor(op1.i, op2.i), op1.type }; } diff --git a/src/compiler/codegen_asm.c b/src/compiler/codegen_asm.c index 8b5654b71..20b2149ff 100644 --- a/src/compiler/codegen_asm.c +++ b/src/compiler/codegen_asm.c @@ -242,7 +242,7 @@ static inline char *codegen_create_riscv_asm(AsmInlineBlock *block) const char *codegen_create_asm(Ast *ast) { - assert(ast->ast_kind == AST_ASM_BLOCK_STMT); + ASSERT0(ast->ast_kind == AST_ASM_BLOCK_STMT); scratch_buffer_clear(); AsmInlineBlock *block = ast->asm_block_stmt.block; if (compiler.platform.arch == ARCH_TYPE_X86_64 || compiler.platform.arch == ARCH_TYPE_X86) diff --git a/src/compiler/codegen_general.c b/src/compiler/codegen_general.c index 2b443b4c6..e70f58c37 100644 --- a/src/compiler/codegen_general.c +++ b/src/compiler/codegen_general.c @@ -168,8 +168,8 @@ bool type_homogenous_aggregate_small_enough(Type *type, unsigned members) */ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements) { - assert(base && type && elements); - assert(type_lowering(type) == type); + ASSERT0(base && type && elements); + ASSERT0(type_lowering(type) == type); *elements = 0; switch (type->type_kind) { @@ -196,7 +196,7 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements) // Go down deep into a nester array. while (member_type->type_kind == TYPE_ARRAY) { - assert(member_type->array.len && "Zero length arrays not allowed"); + ASSERT0(member_type->array.len && "Zero length arrays not allowed"); member_mult *= member_type->array.len; member_type = member_type->array.base; } @@ -215,7 +215,7 @@ bool type_is_homogenous_aggregate(Type *type, Type **base, unsigned *elements) *elements += member_members; } } - assert(base); + ASSERT0(base); if (!*base) return false; // Ensure no padding diff --git a/src/compiler/codegen_internal.h b/src/compiler/codegen_internal.h index 57bbebe2b..8ad5537cb 100644 --- a/src/compiler/codegen_internal.h +++ b/src/compiler/codegen_internal.h @@ -115,7 +115,7 @@ UNUSED static inline bool abi_type_is_promotable_integer_or_bool(AbiType type) return type.type->builtin.bitsize < compiler.platform.width_c_int; } // We should only get npot or > big ints here. - assert(!is_power_of_two(type.int_bits_plus_1 - 1) || type.int_bits_plus_1 < compiler.platform.width_c_int); + ASSERT0(!is_power_of_two(type.int_bits_plus_1 - 1) || type.int_bits_plus_1 < compiler.platform.width_c_int); return false; } diff --git a/src/compiler/compiler.c b/src/compiler/compiler.c index 4369af770..46fe17407 100644 --- a/src/compiler/compiler.c +++ b/src/compiler/compiler.c @@ -174,7 +174,7 @@ const char *build_base_name(void) static const char *exe_name(void) { - assert(compiler.context.main || compiler.build.no_entry); + ASSERT0(compiler.context.main || compiler.build.no_entry); const char *name; if (compiler.build.name || compiler.build.no_entry) { @@ -495,7 +495,7 @@ void compiler_compile(void) output_exe = exe_name(); break; case TARGET_TYPE_EXECUTABLE: - assert(compiler.context.main || compiler.build.no_entry); + ASSERT0(compiler.context.main || compiler.build.no_entry); output_exe = exe_name(); break; case TARGET_TYPE_STATIC_LIB: @@ -549,7 +549,7 @@ void compiler_compile(void) { int compiled = compile_cfiles(compiler.build.cc, compiler.build.csources, compiler.build.cflags, compiler.build.cinclude_dirs, &obj_files[output_file_count], "tmp_c_compile"); - assert(cfiles == compiled); + ASSERT0(cfiles == compiled); (void)compiled; } const char **obj_file_next = &obj_files[output_file_count + cfiles]; @@ -596,7 +596,7 @@ void compiler_compile(void) { puts(obj_files[i]); } - assert(obj_files[i] || !output_exe); + ASSERT0(obj_files[i] || !output_exe); } if (compiler.build.print_output) { @@ -876,7 +876,7 @@ static inline void setup_define(const char *id, Expr *expr) static void setup_int_define(const char *id, uint64_t i, Type *type) { Type *flat = type_flatten(type); - assert(type_is_integer(flat)); + ASSERT0(type_is_integer(flat)); Expr *expr = expr_new_const_int(INVALID_SPAN, flat, i); expr->type = type; if (expr_const_will_overflow(&expr->const_expr, flat->type_kind)) @@ -1444,7 +1444,7 @@ void global_context_clear_errors(void) void global_context_add_type(Type *type) { DEBUG_LOG("Created type %s.", type->name); - assert(type_ok(type)); + ASSERT0(type_ok(type)); vec_add(compiler.context.type, type); } @@ -1461,7 +1461,7 @@ const char *get_object_extension(void) Module *global_context_find_module(const char *name) { - assert(name); + ASSERT0(name); return htable_get(&compiler.context.modules, (void *)name); } diff --git a/src/compiler/compiler_internal.h b/src/compiler/compiler_internal.h index a69c9bd00..0312ac665 100644 --- a/src/compiler/compiler_internal.h +++ b/src/compiler/compiler_internal.h @@ -2290,7 +2290,7 @@ bool sema_analyse_statement(SemaContext *context, Ast *statement); bool sema_expr_analyse_assign_right_side(SemaContext *context, Expr *expr, Type *left_type, Expr *right, bool is_unwrapped_var, bool is_declaration); bool sema_expr_analyse_initializer_list(SemaContext *context, Type *to, Expr *expr); -Expr **sema_expand_vasplat_exprs(SemaContext *c, Expr **exprs); +Expr **sema_expand_vasplat_exprs(SemaContext *context, Expr **exprs); bool sema_expr_analyse_general_call(SemaContext *context, Expr *expr, Decl *decl, Expr *struct_var, bool optional, bool *no_match_ref); @@ -2532,16 +2532,16 @@ INLINE Type *type_from_inferred(Type *flattened, Type *element_type, unsigned co switch (flattened->type_kind) { case TYPE_POINTER: - assert(count == 0); + ASSERT0(count == 0); return type_get_ptr(element_type); case TYPE_VECTOR: - assert(flattened->array.len == count); + ASSERT0(flattened->array.len == count); FALLTHROUGH; case TYPE_INFERRED_VECTOR: return type_get_vector(element_type, count); break; case TYPE_ARRAY: - assert(flattened->array.len == count); + ASSERT0(flattened->array.len == count); FALLTHROUGH; case TYPE_INFERRED_ARRAY: return type_get_array(element_type, count); @@ -2843,7 +2843,7 @@ INLINE Type *type_new(TypeKind kind, const char *name) { Type *type = CALLOCS(Type); type->type_kind = kind; - assert(name); + ASSERT0(name); type->name = name; global_context_add_type(type); return type; @@ -2852,8 +2852,8 @@ INLINE Type *type_new(TypeKind kind, const char *name) INLINE bool type_convert_will_trunc(Type *destination, Type *source) { - assert(type_flat_is_vector(destination) || type_is_builtin(destination->canonical->type_kind)); - assert(type_flat_is_vector(destination) || type_is_builtin(source->canonical->type_kind)); + ASSERT0(type_flat_is_vector(destination) || type_is_builtin(destination->canonical->type_kind)); + ASSERT0(type_flat_is_vector(destination) || type_is_builtin(source->canonical->type_kind)); return type_size(destination) < type_size(source); } @@ -2862,7 +2862,7 @@ INLINE bool type_convert_will_trunc(Type *destination, Type *source) // Useful sanity check function. INLINE void advance_and_verify(ParseContext *context, TokenType token_type) { - assert(context->tok == token_type); + ASSERT0(context->tok == token_type); advance(context); } @@ -3096,7 +3096,7 @@ static inline Decl *decl_raw(Decl *decl) } if (decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_UNWRAPPED) return decl; decl = decl->var.alias; - assert(decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_UNWRAPPED); + ASSERT0(decl->decl_kind != DECL_VAR || decl->var.kind != VARDECL_UNWRAPPED); return decl; } @@ -3371,7 +3371,7 @@ INLINE void expr_resolve_ident(Expr *expr, Decl *decl) INLINE Type *exprtype(ExprId expr_id) { - assert(expr_id); + ASSERT0(expr_id); return exprptr(expr_id)->type; } @@ -3507,7 +3507,7 @@ INLINE void expr_rewrite_const_null(Expr *expr, Type *type) INLINE void expr_rewrite_const_empty_slice(Expr *expr, Type *type) { - assert(type_flatten(type)->type_kind == TYPE_SLICE); + ASSERT0(type_flatten(type)->type_kind == TYPE_SLICE); expr->const_expr = (ExprConst) { .const_kind = CONST_SLICE, .initializer = NULL }; expr->expr_kind = EXPR_CONST; expr->type = type; @@ -3524,8 +3524,8 @@ INLINE void expr_rewrite_const_untyped_list(Expr *expr, Expr **elements) INLINE void expr_rewrite_const_initializer(Expr *expr, Type *type, ConstInitializer *initializer) { - assert(type_flatten(type)->type_kind != TYPE_SLICE); - assert(type != type_untypedlist); + ASSERT0(type_flatten(type)->type_kind != TYPE_SLICE); + ASSERT0(type != type_untypedlist); expr->expr_kind = EXPR_CONST; expr->type = type; expr->const_expr = (ExprConst) { .initializer = initializer, .const_kind = CONST_INITIALIZER }; @@ -3534,8 +3534,8 @@ INLINE void expr_rewrite_const_initializer(Expr *expr, Type *type, ConstInitiali INLINE void expr_rewrite_const_slice(Expr *expr, Type *type, ConstInitializer *initializer) { - assert(type_flatten(type)->type_kind == TYPE_SLICE); - assert(type != type_untypedlist); + ASSERT0(type_flatten(type)->type_kind == TYPE_SLICE); + ASSERT0(type != type_untypedlist); expr->expr_kind = EXPR_CONST; expr->type = type; expr->const_expr = (ExprConst) { .slice_init = initializer, .const_kind = CONST_SLICE }; @@ -3625,7 +3625,7 @@ INLINE AsmRegister *asm_reg_by_index(unsigned index) INLINE void clobbers_add(Clobbers *clobbers, unsigned index) { - assert(index < MAX_CLOBBER_FLAGS); + ASSERT0(index < MAX_CLOBBER_FLAGS); unsigned bit = index % 64; unsigned element = index / 64; clobbers->mask[element] |= (1ull << bit); @@ -3638,7 +3638,7 @@ static inline Clobbers clobbers_make_from(Clobbers clobbers, ...) int i; while ((i = va_arg(list, int)) > -1) { - assert(i < MAX_CLOBBER_FLAGS); + ASSERT0(i < MAX_CLOBBER_FLAGS); unsigned bit = i % 64; unsigned element = i / 64; clobbers.mask[element] |= (1ull << bit); @@ -3650,7 +3650,7 @@ static inline Clobbers clobbers_make_from(Clobbers clobbers, ...) static inline Clobbers clobbers_make(unsigned index, ...) { Clobbers clobbers = { .mask[0] = 0 }; - assert(index < MAX_CLOBBER_FLAGS); + ASSERT0(index < MAX_CLOBBER_FLAGS); unsigned bit = index % 64; unsigned element = index / 64; clobbers.mask[element] |= (1ull << bit); @@ -3659,7 +3659,7 @@ static inline Clobbers clobbers_make(unsigned index, ...) int i; while ((i = va_arg(list, int)) > -1) { - assert(i < MAX_CLOBBER_FLAGS); + ASSERT0(i < MAX_CLOBBER_FLAGS); bit = i % 64; element = i / 64; clobbers.mask[element] |= (1ull << bit); diff --git a/src/compiler/context.c b/src/compiler/context.c index b427d55f9..93d7a65a9 100644 --- a/src/compiler/context.c +++ b/src/compiler/context.c @@ -174,7 +174,7 @@ void decl_register(Decl *decl) void unit_register_global_decl(CompilationUnit *unit, Decl *decl) { - assert(!decl->unit || decl->unit->module->is_generic); + ASSERT0(!decl->unit || decl->unit->module->is_generic); decl->unit = unit; switch (decl->decl_kind) @@ -184,7 +184,7 @@ void unit_register_global_decl(CompilationUnit *unit, Decl *decl) case DECL_POISONED: break; case DECL_MACRO: - assert(decl->name); + ASSERT0(decl->name); if (decl->func_decl.type_parent) { vec_add(unit->methods_to_register, decl); @@ -197,7 +197,7 @@ void unit_register_global_decl(CompilationUnit *unit, Decl *decl) decl_register(decl); break; case DECL_FUNC: - assert(decl->name); + ASSERT0(decl->name); if (decl->func_decl.type_parent) { vec_add(unit->methods_to_register, decl); @@ -210,7 +210,7 @@ void unit_register_global_decl(CompilationUnit *unit, Decl *decl) decl_register(decl); break; case DECL_VAR: - assert(decl->name); + ASSERT0(decl->name); vec_add(unit->vars, decl); decl_register(decl); break; @@ -221,17 +221,17 @@ void unit_register_global_decl(CompilationUnit *unit, Decl *decl) case DECL_TYPEDEF: case DECL_FAULT: case DECL_BITSTRUCT: - assert(decl->name); + ASSERT0(decl->name); vec_add(unit->types, decl); decl_register(decl); break; case DECL_DEFINE: - assert(decl->name); + ASSERT0(decl->name); vec_add(unit->generic_defines, decl); decl_register(decl); break; case DECL_ENUM: - assert(decl->name); + ASSERT0(decl->name); vec_add(unit->enums, decl); decl_register(decl); break; @@ -269,7 +269,7 @@ void unit_register_global_decl(CompilationUnit *unit, Decl *decl) } return; ERR: - assert(decl != old); + ASSERT0(decl != old); sema_shadow_error(NULL, decl, old); decl_poison(decl); decl_poison(old); diff --git a/src/compiler/copying.c b/src/compiler/copying.c index ac2a4c787..e89389119 100644 --- a/src/compiler/copying.c +++ b/src/compiler/copying.c @@ -168,7 +168,7 @@ TypeInfo *copy_type_info_single(TypeInfo *type_info) Ast *copy_ast_macro(Ast *source_ast) { - assert(copy_struct.copy_in_use); + ASSERT0(copy_struct.copy_in_use); return ast_copy_deep(©_struct, source_ast); } @@ -407,7 +407,7 @@ Expr *copy_expr(CopyStruct *c, Expr *source_expr) return expr; case EXPR_CT_IDENT: case EXPR_HASH_IDENT: - assert(expr->resolve_status != RESOLVE_DONE); + ASSERT0(expr->resolve_status != RESOLVE_DONE); return expr; case EXPR_BENCHMARK_HOOK: case EXPR_TEST_HOOK: @@ -762,20 +762,20 @@ Ast **copy_ast_list(CopyStruct *c, Ast **to_copy) void copy_begin(void) { copy_struct.current_fixup = copy_struct.fixups; - assert(!copy_struct.copy_in_use); + ASSERT0(!copy_struct.copy_in_use); copy_struct.copy_in_use = true; copy_struct.single_static = false; } void copy_end(void) { - assert(copy_struct.copy_in_use); + ASSERT0(copy_struct.copy_in_use); copy_struct.copy_in_use = false; } Decl **copy_decl_list_macro(Decl **decl_list) { - assert(copy_struct.copy_in_use); + ASSERT0(copy_struct.copy_in_use); return copy_decl_list(©_struct, decl_list); } @@ -879,19 +879,19 @@ TypeInfo *copy_type_info(CopyStruct *c, TypeInfo *source) case TYPE_INFO_EVALTYPE: case TYPE_INFO_TYPEOF: case TYPE_INFO_VATYPE: - assert(source->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(source->resolve_status == RESOLVE_NOT_DONE); copy->unresolved_type_expr = copy_expr(c, source->unresolved_type_expr); return copy; case TYPE_INFO_VECTOR: case TYPE_INFO_ARRAY: - assert(source->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(source->resolve_status == RESOLVE_NOT_DONE); copy->array.len = copy_expr(c, source->array.len); copy->array.base = copy_type_info(c, source->array.base); return copy; case TYPE_INFO_INFERRED_ARRAY: case TYPE_INFO_SLICE: case TYPE_INFO_INFERRED_VECTOR: - assert(source->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(source->resolve_status == RESOLVE_NOT_DONE); copy->array.base = copy_type_info(c, source->array.base); return copy; case TYPE_INFO_POINTER: diff --git a/src/compiler/decltable.c b/src/compiler/decltable.c index a651c1933..30d755f83 100644 --- a/src/compiler/decltable.c +++ b/src/compiler/decltable.c @@ -41,11 +41,11 @@ static inline void decltable_resize(DeclTable *table) void decltable_set(DeclTable *table, Decl *decl) { - assert(decl && "Cannot insert NULL"); + ASSERT0(decl && "Cannot insert NULL"); DeclId *entry = declentry_find(table->entries, table->capacity, decl->name); DeclId decl_id = declid(decl); DeclId old_id = *entry; - assert(old_id != decl_id); + ASSERT0(old_id != decl_id); // Simple case, a new decl if (!old_id) { @@ -81,7 +81,7 @@ DeclId decltable_get(DeclTable *table, const char *name) void decltable_init(DeclTable *table, uint32_t initial_size) { - assert(initial_size && "Size must be larger than 0"); + ASSERT0(initial_size && "Size must be larger than 0"); assert (is_power_of_two(initial_size) && "Must be a power of two"); DeclId *entries = CALLOC(initial_size * sizeof(DeclId)); diff --git a/src/compiler/diagnostics.c b/src/compiler/diagnostics.c index 9e14de1cc..a71aa8077 100644 --- a/src/compiler/diagnostics.c +++ b/src/compiler/diagnostics.c @@ -237,7 +237,7 @@ void span_to_scratch(SourceSpan span) break; } } - assert(row == row_to_find); + ASSERT0(row == row_to_find); const char *start = current + col - 1; bool last_was_whitespace = false; for (uint32_t i = 0; i < length; i++) @@ -277,7 +277,7 @@ const char *span_to_string(SourceSpan span) break; } } - assert(row == row_to_find); + ASSERT0(row == row_to_find); const char *start = current + col - 1; return str_copy(start, length); } diff --git a/src/compiler/expr.c b/src/compiler/expr.c index e11b92307..89f95b001 100644 --- a/src/compiler/expr.c +++ b/src/compiler/expr.c @@ -22,7 +22,7 @@ Expr *expr_negate_expr(Expr *expr) bool expr_in_int_range(Expr *expr, int64_t low, int64_t high) { - assert(expr_is_const(expr) && expr->const_expr.const_kind == CONST_INTEGER); + ASSERT0(expr_is_const(expr) && expr->const_expr.const_kind == CONST_INTEGER); Int val = expr->const_expr.ixx; if (!int_fits(val, TYPE_I64)) return false; int64_t value = int_to_i64(val); @@ -137,7 +137,7 @@ bool expr_may_addr(Expr *expr) bool expr_is_runtime_const(Expr *expr) { - assert(expr->resolve_status == RESOLVE_DONE); + ASSERT0(expr->resolve_status == RESOLVE_DONE); RETRY: switch (expr->expr_kind) { @@ -274,7 +274,7 @@ bool expr_is_runtime_const(Expr *expr) } goto RETRY; case EXPR_TERNARY: - assert(!exprid_is_runtime_const(expr->ternary_expr.cond)); + ASSERT0(!exprid_is_runtime_const(expr->ternary_expr.cond)); return false; case EXPR_FORCE_UNWRAP: case EXPR_LAST_FAULT: @@ -442,7 +442,7 @@ static inline bool expr_unary_addr_is_constant_eval(Expr *expr) void expr_insert_addr(Expr *original) { - assert(original->resolve_status == RESOLVE_DONE); + ASSERT0(original->resolve_status == RESOLVE_DONE); if (original->expr_kind == EXPR_UNARY && original->unary_expr.operator == UNARYOP_DEREF) { *original = *original->unary_expr.expr; @@ -459,8 +459,8 @@ void expr_insert_addr(Expr *original) Expr *expr_generate_decl(Decl *decl, Expr *assign) { - assert(decl->decl_kind == DECL_VAR); - assert(decl->var.init_expr == NULL); + ASSERT0(decl->decl_kind == DECL_VAR); + ASSERT0(decl->var.init_expr == NULL); Expr *expr_decl = expr_new(EXPR_DECL, decl->span); expr_decl->decl_expr = decl; if (!assign) decl->var.no_init = true; @@ -497,7 +497,7 @@ static inline ConstInitializer *initializer_for_index(ConstInitializer *initiali } FOREACH(ConstInitializer *, init, initializer->init_array.elements) { - assert(init->kind == CONST_INIT_ARRAY_VALUE); + ASSERT0(init->kind == CONST_INIT_ARRAY_VALUE); if (init->init_array_value.index == index) return init->init_array_value.element; } return NULL; @@ -539,7 +539,7 @@ void expr_rewrite_to_const_zero(Expr *expr, Type *type) return; case TYPE_ENUM: expr->const_expr.const_kind = CONST_ENUM; - assert(canonical->decl->resolve_status == RESOLVE_DONE); + ASSERT0(canonical->decl->resolve_status == RESOLVE_DONE); expr->const_expr.enum_err_val = canonical->decl->enums.values[0]; expr->resolve_status = RESOLVE_DONE; break; @@ -865,7 +865,7 @@ Expr *expr_new_const_bool(SourceSpan span, Type *type, bool value) expr->expr_kind = EXPR_CONST; expr->span = span; expr->type = type; - assert(type_flatten(type)->type_kind == TYPE_BOOL); + ASSERT0(type_flatten(type)->type_kind == TYPE_BOOL); expr->const_expr.b = value; expr->const_expr.const_kind = CONST_BOOL; expr->resolve_status = RESOLVE_DONE; @@ -929,7 +929,7 @@ void expr_rewrite_insert_deref(Expr *original) if (original->resolve_status == RESOLVE_DONE) { Type *no_fail = type_no_optional(inner->type); - assert(no_fail->canonical->type_kind == TYPE_POINTER); + ASSERT0(no_fail->canonical->type_kind == TYPE_POINTER); // Only fold to the canonical type if it wasn't a pointer. Type *pointee = no_fail->type_kind == TYPE_POINTER ? no_fail->pointer : no_fail->canonical->pointer; diff --git a/src/compiler/float.c b/src/compiler/float.c index cb035d0dc..c6be5a730 100644 --- a/src/compiler/float.c +++ b/src/compiler/float.c @@ -63,31 +63,31 @@ UNUSED static int min_exponent(TypeKind kind) Float float_add(Float op1, Float op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Float){ op1.f + op2.f, op1.type }; } Float float_sub(Float op1, Float op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Float){ op1.f - op2.f, op1.type }; } Float float_mul(Float op1, Float op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Float){ op1.f * op2.f, op1.type }; } Float float_div(Float op1, Float op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Float){ op1.f / op2.f, op1.type }; } Float float_rem(Float op1, Float op2) { - assert(op1.type == op2.type); + ASSERT0(op1.type == op2.type); return (Float){fmod(op1.f, op2.f), op1.type }; } diff --git a/src/compiler/headers.c b/src/compiler/headers.c index ff4ce5450..39e624e38 100644 --- a/src/compiler/headers.c +++ b/src/compiler/headers.c @@ -46,7 +46,7 @@ INLINE bool header_try_gen_both(HeaderContext *c, Type *type) if (header_try_gen_definition(c, type)) { bool success = header_try_gen_decl(c, type); - assert(success); + ASSERT0(success); return true; } return false; @@ -91,7 +91,7 @@ static void header_print_type(HeaderContext *c, Type *type) PRINTF("%s", decl_get_extname(type->decl)); return; } - assert(!type_is_optional(type)); + ASSERT0(!type_is_optional(type)); switch (type->type_kind) { case CT_TYPES: @@ -581,7 +581,7 @@ static void header_gen_maybe_generate_type(HeaderContext *c, Type *type, bool is static void header_gen_global_var(HeaderContext *c, Decl *decl, bool fn_globals, bool *globals_found) { - assert(decl->decl_kind == DECL_VAR); + ASSERT0(decl->decl_kind == DECL_VAR); // Only exports. if (!decl->is_export) return; Type *type = decl->type->canonical; @@ -613,7 +613,7 @@ static void header_gen_global_var(HeaderContext *c, Decl *decl, bool fn_globals, Type *flat = type_flatten(type); if (type_is_arraylike(flat) || type_is_user_defined(flat) || !init) return; PRINTF("#define %s ", decl_get_extname(decl)); - assert(expr_is_const(init)); + ASSERT0(expr_is_const(init)); switch (init->const_expr.const_kind) { case CONST_INTEGER: @@ -669,7 +669,7 @@ static void header_gen_global_var(HeaderContext *c, Decl *decl, bool fn_globals, return; } header_print_type(c, decl->type); - assert(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); + ASSERT0(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); PRINTF("extern "); if (decl->var.kind == VARDECL_CONST) PRINTF("const "); PRINTF(" %s;\n", decl_get_extname(decl)); diff --git a/src/compiler/lexer.c b/src/compiler/lexer.c index 76e544595..1cabdd1ac 100644 --- a/src/compiler/lexer.c +++ b/src/compiler/lexer.c @@ -61,7 +61,7 @@ static inline void backtrack(Lexer *lexer) // Skip the x next characters. static inline void skip(Lexer *lexer, int steps) { - assert(steps > 0); + ASSERT0(steps > 0); for (int i = 0; i < steps; i++) { next(lexer); @@ -549,7 +549,7 @@ static inline bool scan_hex(Lexer *lexer) */ static inline bool scan_dec(Lexer *lexer) { - assert(char_is_digit(peek(lexer))); + ASSERT0(char_is_digit(peek(lexer))); // Walk through the digits, we don't need to worry about // initial _ because we only call this if we have a digit initially. @@ -753,7 +753,7 @@ static inline bool scan_char(Lexer *lexer) signed char escape = ' '; if (c == '\\') { - assert(c == '\\'); + ASSERT0(c == '\\'); c = peek(lexer); escape = char_is_valid_escape(c); if (escape == -1) @@ -823,7 +823,7 @@ static inline bool scan_char(Lexer *lexer) b = i128_shl64(b, 8); b = i128_add64(b, (unsigned char)c); } - assert(width > 0 && width <= 16); + ASSERT0(width > 0 && width <= 16); DONE: set_generic_token(lexer, TOKEN_CHAR_LITERAL); lexer->data.char_value = b; @@ -1239,7 +1239,7 @@ static bool lexer_scan_token_inner(Lexer *lexer) switch (c) { case '\n': - assert(lexer->mode == LEX_CONTRACTS); + ASSERT0(lexer->mode == LEX_CONTRACTS); return new_token(lexer, TOKEN_DOCS_EOL, ""); case '@': if (char_is_letter_(peek(lexer))) diff --git a/src/compiler/linker.c b/src/compiler/linker.c index 62200640e..baf82ba14 100644 --- a/src/compiler/linker.c +++ b/src/compiler/linker.c @@ -298,7 +298,7 @@ static const char *find_arch_glob_path(const char *glob_path, int file_len) && compiler.platform.arch != ARCH_TYPE_RISCV32 && strstr(path, "riscv")) continue; size_t len = strlen(path); - assert(len > file_len); + ASSERT0(len > file_len); const char *res = str_copy(path, len - file_len); globfree(&globbuf); return res; @@ -775,7 +775,7 @@ static char *assemble_linker_command(const char **args, bool extra_quote) { if (i != 0) scratch_buffer_append_char(' '); const char *arg = args[i]; - assert(arg != scratch_buffer.str && "Incorrectly passed a scratch buffer string as an argument."); + ASSERT0(arg != scratch_buffer.str && "Incorrectly passed a scratch buffer string as an argument."); if (arg == quote_arg) { scratch_buffer_append_cmd_argument(args[++i]); diff --git a/src/compiler/llvm_codegen.c b/src/compiler/llvm_codegen.c index b1bbe3258..8909afdec 100644 --- a/src/compiler/llvm_codegen.c +++ b/src/compiler/llvm_codegen.c @@ -58,7 +58,7 @@ bool module_should_weaken(Module *module) static void gencontext_init(GenContext *context, Module *module, LLVMContextRef shared_context) { - assert(LLVMIsMultithreaded()); + ASSERT0(LLVMIsMultithreaded()); memset(context, 0, sizeof(GenContext)); context->weaken = module_should_weaken(module); @@ -81,7 +81,7 @@ static void gencontext_init(GenContext *context, Module *module, LLVMContextRef static void gencontext_destroy(GenContext *context) { - assert(llvm_is_global_eval(context)); + ASSERT0(llvm_is_global_eval(context)); LLVMDisposeBuilder(context->global_builder); if (!context->shared_context) LLVMContextDispose(context->context); LLVMDisposeTargetData(context->target_data); @@ -229,25 +229,25 @@ static LLVMValueRef llvm_emit_const_array_padding(LLVMTypeRef element_type, Inde LLVMValueRef llvm_emit_const_initializer(GenContext *c, ConstInitializer *const_init) { - assert(const_init->type == type_flatten(const_init->type)); + ASSERT0(const_init->type == type_flatten(const_init->type)); switch (const_init->kind) { case CONST_INIT_ZERO: - assert(const_init->type->type_kind != TYPE_SLICE); + ASSERT0(const_init->type->type_kind != TYPE_SLICE); return llvm_get_zero(c, const_init->type); case CONST_INIT_ARRAY_VALUE: UNREACHABLE case CONST_INIT_ARRAY_FULL: { - assert(const_init->type->type_kind != TYPE_SLICE); + ASSERT0(const_init->type->type_kind != TYPE_SLICE); bool was_modified = false; Type *array_type = const_init->type; Type *element_type = array_type->array.base; LLVMTypeRef element_type_llvm = llvm_get_type(c, element_type); ConstInitializer **elements = const_init->init_array_full; - assert(array_type->type_kind == TYPE_ARRAY || array_type->type_kind == TYPE_VECTOR); + ASSERT0(array_type->type_kind == TYPE_ARRAY || array_type->type_kind == TYPE_VECTOR); ArraySize size = array_type->array.len; - assert(size > 0); + ASSERT0(size > 0); LLVMValueRef *parts = VECNEW(LLVMValueRef, size); for (ArrayIndex i = 0; i < (ArrayIndex)size; i++) { @@ -268,21 +268,21 @@ LLVMValueRef llvm_emit_const_initializer(GenContext *c, ConstInitializer *const_ case CONST_INIT_ARRAY: { - assert(const_init->type->type_kind != TYPE_SLICE); + ASSERT0(const_init->type->type_kind != TYPE_SLICE); bool was_modified = false; Type *array_type = const_init->type; Type *element_type = array_type->array.base; LLVMTypeRef element_type_llvm = llvm_get_type(c, element_type); AlignSize expected_align = llvm_abi_alignment(c, element_type_llvm); ConstInitializer **elements = const_init->init_array.elements; - assert(vec_size(elements) > 0 && "Array should always have gotten at least one element."); + ASSERT0(vec_size(elements) > 0 && "Array should always have gotten at least one element."); ArrayIndex current_index = 0; unsigned alignment = 0; LLVMValueRef *parts = NULL; bool pack = false; FOREACH(ConstInitializer *, element, elements) { - assert(element->kind == CONST_INIT_ARRAY_VALUE); + ASSERT0(element->kind == CONST_INIT_ARRAY_VALUE); ArrayIndex element_index = element->init_array_value.index; IndexDiff diff = element_index - current_index; if (alignment && expected_align != alignment) @@ -534,7 +534,7 @@ void llvm_set_private_declaration(LLVMValueRef alloc) void llvm_emit_global_variable_init(GenContext *c, Decl *decl) { - assert(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST || decl->var.is_static); + ASSERT0(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST || decl->var.is_static); // Skip real constants. if (!decl->type) return; @@ -558,7 +558,7 @@ void llvm_emit_global_variable_init(GenContext *c, Decl *decl) { if (expr_is_const_initializer(init_expr)) { - assert(type_flatten(init_expr->type)->type_kind != TYPE_SLICE); + ASSERT0(type_flatten(init_expr->type)->type_kind != TYPE_SLICE); ConstInitializer *list = init_expr->const_expr.initializer; init_value = llvm_emit_const_initializer(c, list); } @@ -624,7 +624,7 @@ void llvm_emit_global_variable_init(GenContext *c, Decl *decl) if (init_expr && IS_OPTIONAL(init_expr) && init_expr->expr_kind == EXPR_OPTIONAL) { Expr *inner = init_expr->inner_expr; - assert(expr_is_const(inner) && inner->const_expr.const_kind == CONST_ERR); + ASSERT0(expr_is_const(inner) && inner->const_expr.const_kind == CONST_ERR); BEValue value; llvm_emit_expr_global_value(c, &value, inner); optional_value = llvm_load_value_store(c, &value); @@ -653,7 +653,7 @@ void llvm_emit_global_variable_init(GenContext *c, Decl *decl) static void gencontext_verify_ir(GenContext *context) { char *error = NULL; - assert(context->module); + ASSERT0(context->module); if (LLVMVerifyModule(context->module, LLVMPrintMessageAction, &error)) { if (*error) @@ -721,12 +721,12 @@ void gencontext_print_llvm_ir(GenContext *context) LLVMValueRef llvm_emit_alloca(GenContext *c, LLVMTypeRef type, unsigned alignment, const char *name) { - assert(LLVMGetTypeKind(type) != LLVMVoidTypeKind); - assert(!llvm_is_global_eval(c)); - assert(alignment > 0); + ASSERT0(LLVMGetTypeKind(type) != LLVMVoidTypeKind); + ASSERT0(!llvm_is_global_eval(c)); + ASSERT0(alignment > 0); LLVMBasicBlockRef current_block = LLVMGetInsertBlock(c->builder); LLVMPositionBuilderBefore(c->builder, c->alloca_point); - assert(LLVMGetTypeContext(type) == c->context); + ASSERT0(LLVMGetTypeContext(type) == c->context); LLVMValueRef alloca = LLVMBuildAlloca(c->builder, type, name); llvm_set_alignment(alloca, alignment); LLVMPositionBuilderAtEnd(c->builder, current_block); @@ -742,13 +742,13 @@ void llvm_emit_and_set_decl_alloca(GenContext *c, Decl *decl) { Type *type = type_lowering(decl->type); if (type == type_void) return; - assert(!decl->backend_ref && !decl->is_value); + ASSERT0(!decl->backend_ref && !decl->is_value); decl->backend_ref = llvm_emit_alloca(c, llvm_get_type(c, type), decl->alignment, decl->name ? decl->name : ".anon"); } void llvm_emit_local_var_alloca(GenContext *c, Decl *decl) { - assert(!decl->var.is_static && decl->var.kind != VARDECL_CONST); + ASSERT0(!decl->var.is_static && decl->var.kind != VARDECL_CONST); llvm_emit_and_set_decl_alloca(c, decl); if (llvm_use_debug(c)) { @@ -1089,7 +1089,7 @@ const char *llvm_codegen(void *context) void llvm_add_global_decl(GenContext *c, Decl *decl) { - assert(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); + ASSERT0(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); bool same_module = decl->unit->module == c->code_module; LLVMTypeRef type = llvm_get_type(c, decl->type); @@ -1129,7 +1129,7 @@ LLVMValueRef llvm_get_opt_ref(GenContext *c, Decl *decl) static void llvm_emit_param_attributes(GenContext *c, LLVMValueRef function, ABIArgInfo *info, bool is_return, int index, int last_index, Decl *decl) { - assert(last_index == index || info->kind == ABI_ARG_DIRECT_PAIR || info->kind == ABI_ARG_IGNORE + ASSERT0(last_index == index || info->kind == ABI_ARG_DIRECT_PAIR || info->kind == ABI_ARG_IGNORE || info->kind == ABI_ARG_EXPAND || info->kind == ABI_ARG_DIRECT || info->kind == ABI_ARG_DIRECT_COERCE || info->kind == ABI_ARG_DIRECT_COERCE_INT || info->kind == ABI_ARG_EXPAND_COERCE || info->kind == ABI_ARG_DIRECT_SPLIT_STRUCT_I32); @@ -1137,13 +1137,13 @@ static void llvm_emit_param_attributes(GenContext *c, LLVMValueRef function, ABI if (info->attributes.zeroext) { // Direct only - assert(index == last_index); + ASSERT0(index == last_index); llvm_attribute_add(c, function, attribute_id.zext, index); } if (info->attributes.signext) { // Direct only - assert(index == last_index); + ASSERT0(index == last_index); llvm_attribute_add(c, function, attribute_id.sext, index); } if (info->attributes.by_reg) @@ -1169,7 +1169,7 @@ static void llvm_emit_param_attributes(GenContext *c, LLVMValueRef function, ABI case ABI_ARG_INDIRECT: if (is_return) { - assert(info->indirect.type); + ASSERT0(info->indirect.type); llvm_attribute_add_type(c, function, attribute_id.sret, llvm_get_type(c, info->indirect.type), 1); llvm_attribute_add(c, function, attribute_id.noalias, 1); llvm_attribute_add_int(c, function, attribute_id.align, info->indirect.alignment, 1); @@ -1266,7 +1266,7 @@ void llvm_append_function_attributes(GenContext *c, Decl *decl) } LLVMValueRef llvm_get_ref(GenContext *c, Decl *decl) { - assert(!decl->is_value); + ASSERT0(!decl->is_value); LLVMValueRef backend_ref = decl->backend_ref; if (backend_ref) { @@ -1282,7 +1282,7 @@ LLVMValueRef llvm_get_ref(GenContext *c, Decl *decl) { return decl->backend_ref = llvm_get_ref(c, decl->var.alias); } - assert(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); + ASSERT0(decl->var.kind == VARDECL_GLOBAL || decl->var.kind == VARDECL_CONST); llvm_add_global_decl(c, decl); return decl->backend_ref; case DECL_FUNC: @@ -1310,7 +1310,7 @@ LLVMValueRef llvm_get_ref(GenContext *c, Decl *decl) { llvm_get_typeid(c, declptr(decl->enum_constant.parent)->type); } - assert(decl->backend_ref); + ASSERT0(decl->backend_ref); return decl->backend_ref; case DECL_POISONED: case DECL_ATTRIBUTE: @@ -1345,7 +1345,7 @@ static void llvm_gen_test_main(GenContext *c) { error_exit("No test runner found."); } - assert(!compiler.context.main && "Main should not be set if a test main is generated."); + ASSERT0(!compiler.context.main && "Main should not be set if a test main is generated."); compiler.context.main = test_runner; LLVMTypeRef cint = llvm_get_type(c, type_cint); LLVMTypeRef main_type = LLVMFunctionType(cint, NULL, 0, true); @@ -1442,7 +1442,7 @@ static void llvm_gen_benchmark_main(GenContext *c) { error_exit("No benchmark runner found."); } - assert(!compiler.context.main && "Main should not be set if a benchmark main is generated."); + ASSERT0(!compiler.context.main && "Main should not be set if a benchmark main is generated."); compiler.context.main = benchmark_runner; LLVMTypeRef cint = llvm_get_type(c, type_cint); LLVMTypeRef main_type = LLVMFunctionType(cint, NULL, 0, true); @@ -1616,7 +1616,7 @@ static GenContext *llvm_gen_module(Module *module, LLVMContextRef shared_context if (!vec_size(module->units)) return NULL; if (compiler.build.emit_stdlib == EMIT_STDLIB_OFF && module_is_stdlib(module)) return NULL; - assert(intrinsics_setup); + ASSERT0(intrinsics_setup); bool has_elements = false; GenContext *gen_context = cmalloc(sizeof(GenContext)); @@ -1818,7 +1818,7 @@ AlignSize llvm_abi_alignment(GenContext *c, LLVMTypeRef type) LLVMValueRef llvm_emit_memcpy(GenContext *c, LLVMValueRef dest, unsigned dest_align, LLVMValueRef source, unsigned src_align, uint64_t len) { - assert(dest_align && src_align); + ASSERT0(dest_align && src_align); if (len <= UINT32_MAX) { return LLVMBuildMemCpy(c->builder, dest, dest_align, source, src_align, llvm_const_int(c, type_uint, len)); diff --git a/src/compiler/llvm_codegen_builtins.c b/src/compiler/llvm_codegen_builtins.c index 74ce48ab2..4dbe68b9b 100644 --- a/src/compiler/llvm_codegen_builtins.c +++ b/src/compiler/llvm_codegen_builtins.c @@ -112,7 +112,7 @@ INLINE void llvm_emit_compare_exchange(GenContext *c, BEValue *result_value, Exp ordering_to_llvm(success_ordering), ordering_to_llvm(failure_ordering), false); if (alignment && alignment >= type_abi_alignment(type)) { - assert(is_power_of_two(alignment)); + ASSERT0(is_power_of_two(alignment)); LLVMSetAlignment(result, alignment); } if (is_volatile) LLVMSetVolatile(result, true); @@ -227,7 +227,7 @@ INLINE void llvm_emit_atomic_fetch(GenContext *c, BuiltinFunction func, BEValue uint64_t alignment = expr->call_expr.arguments[4]->const_expr.ixx.i.low; if (alignment) { - assert(is_power_of_two(alignment)); + ASSERT0(is_power_of_two(alignment)); LLVMSetAlignment(res, alignment); } llvm_value_set(result_value, res, result_value->type); @@ -270,7 +270,7 @@ static inline void llvm_syscall_write_regs_to_scratch(const char** registers, un static inline void llvm_emit_syscall(GenContext *c, BEValue *be_value, Expr *expr) { unsigned arguments = vec_size(expr->call_expr.arguments); - assert(arguments < 10 && "Only has room for 10"); + ASSERT0(arguments < 10 && "Only has room for 10"); LLVMValueRef arg_results[10]; LLVMTypeRef arg_types[10]; Expr **args = expr->call_expr.arguments; @@ -290,7 +290,7 @@ static inline void llvm_emit_syscall(GenContext *c, BEValue *be_value, Expr *exp case ARCH_TYPE_AARCH64: case ARCH_TYPE_AARCH64_BE: scratch_buffer_append("={x0}"); - assert(arguments < 8); + ASSERT0(arguments < 8); if (os_is_apple(compiler.platform.os)) { static char const *regs[] = { "x16", "x0", "x1", "x2", "x3", "x4", "x5" }; @@ -306,7 +306,7 @@ static inline void llvm_emit_syscall(GenContext *c, BEValue *be_value, Expr *exp case ARCH_TYPE_X86: { scratch_buffer_append("={eax}"); - assert(arguments < 8); + ASSERT0(arguments < 8); static char const *regs[] = { "eax", "ebx", "ecx", "edx", "esi", "edi" }; llvm_syscall_write_regs_to_scratch(regs, arguments < 6 ? arguments : 6); if (arguments == 7) @@ -321,7 +321,7 @@ static inline void llvm_emit_syscall(GenContext *c, BEValue *be_value, Expr *exp } case ARCH_TYPE_X86_64: scratch_buffer_append("={rax}"); - assert(arguments < 8); + ASSERT0(arguments < 8); { static char const *regs[] = { "rax", "rdi", "rsi", "rdx", "r10", "r8", "r9" }; llvm_syscall_write_regs_to_scratch(regs, arguments); @@ -379,8 +379,8 @@ INLINE void llvm_emit_memcpy_builtin(GenContext *c, unsigned intrinsic, BEValue call_type[0] = call_type[1] = c->ptr_type; call_type[2] = c->size_type; LLVMValueRef result = llvm_emit_call_intrinsic(c, intrinsic, call_type, 3, arg_slots, 4); - assert(args[4]->const_expr.const_kind == CONST_INTEGER); - assert(args[5]->const_expr.const_kind == CONST_INTEGER); + ASSERT0(args[4]->const_expr.const_kind == CONST_INTEGER); + ASSERT0(args[5]->const_expr.const_kind == CONST_INTEGER); uint64_t dst_align = int_to_u64(args[4]->const_expr.ixx); uint64_t src_align = int_to_u64(args[5]->const_expr.ixx); if (dst_align > 0) llvm_attribute_add_call(c, result, attribute_id.align, 1, dst_align); @@ -397,8 +397,8 @@ INLINE void llvm_emit_memmove_builtin(GenContext *c, BEValue *be_value, Expr *ex call_type[0] = call_type[1] = c->ptr_type; call_type[2] = c->size_type; LLVMValueRef result = llvm_emit_call_intrinsic(c, intrinsic_id.memmove, call_type, 3, arg_slots, 4); - assert(args[4]->const_expr.const_kind == CONST_INTEGER); - assert(args[5]->const_expr.const_kind == CONST_INTEGER); + ASSERT0(args[4]->const_expr.const_kind == CONST_INTEGER); + ASSERT0(args[5]->const_expr.const_kind == CONST_INTEGER); uint64_t dst_align = int_to_u64(args[4]->const_expr.ixx); uint64_t src_align = int_to_u64(args[5]->const_expr.ixx); if (dst_align > 0) llvm_attribute_add_call(c, result, attribute_id.align, 1, dst_align); @@ -413,7 +413,7 @@ INLINE void llvm_emit_memset_builtin(GenContext *c, unsigned intrinsic, BEValue llvm_emit_intrinsic_args(c, args, arg_slots, 4); LLVMTypeRef call_type[2] = { c->ptr_type, c->size_type }; LLVMValueRef result = llvm_emit_call_intrinsic(c, intrinsic, call_type, 2, arg_slots, 4); - assert(args[4]->const_expr.const_kind == CONST_INTEGER); + ASSERT0(args[4]->const_expr.const_kind == CONST_INTEGER); uint64_t dst_align = int_to_u64(args[4]->const_expr.ixx); if (dst_align > 0) llvm_attribute_add_call(c, result, attribute_id.align, 1, dst_align); llvm_value_set(be_value, result, type_void); @@ -475,7 +475,7 @@ void llvm_emit_3_variant_builtin(GenContext *c, BEValue *be_value, Expr *expr, u { Expr **args = expr->call_expr.arguments; unsigned count = vec_size(args); - assert(count <= 3); + ASSERT0(count <= 3); LLVMValueRef arg_slots[3]; unsigned intrinsic = llvm_intrinsic_by_type(args[0]->type, sid, uid, fid); llvm_emit_intrinsic_args(c, args, arg_slots, count); @@ -508,8 +508,8 @@ void llvm_emit_simple_builtin(GenContext *c, BEValue *be_value, Expr *expr, unsi { Expr **args = expr->call_expr.arguments; unsigned count = vec_size(args); - assert(count <= 4); - assert(count > 0); + ASSERT0(count <= 4); + ASSERT0(count > 0); LLVMValueRef arg_slots[4]; llvm_emit_intrinsic_args(c, args, arg_slots, count); LLVMTypeRef call_type = LLVMTypeOf(arg_slots[0]); @@ -520,7 +520,7 @@ void llvm_emit_simple_builtin(GenContext *c, BEValue *be_value, Expr *expr, unsi static void llvm_emit_masked_load(GenContext *c, BEValue *be_value, Expr *expr) { Expr **args = expr->call_expr.arguments; - assert(vec_size(args) == 4); + ASSERT0(vec_size(args) == 4); LLVMValueRef arg_slots[4]; llvm_emit_intrinsic_args(c, args, arg_slots, 3); // Rearrange to match our builtin with the intrinsic which is ptr, align, mask, passthru @@ -538,7 +538,7 @@ static void llvm_emit_masked_load(GenContext *c, BEValue *be_value, Expr *expr) static void llvm_emit_gather(GenContext *c, BEValue *be_value, Expr *expr) { Expr **args = expr->call_expr.arguments; - assert(vec_size(args) == 4); + ASSERT0(vec_size(args) == 4); LLVMValueRef arg_slots[4]; llvm_emit_intrinsic_args(c, args, arg_slots, 3); // Rearrange to match our builtin with the intrinsic which is ptr, align, mask, passthru @@ -556,7 +556,7 @@ static void llvm_emit_gather(GenContext *c, BEValue *be_value, Expr *expr) static void llvm_emit_masked_store(GenContext *c, BEValue *be_value, Expr *expr) { Expr **args = expr->call_expr.arguments; - assert(vec_size(args) == 4); + ASSERT0(vec_size(args) == 4); LLVMValueRef arg_slots[4]; llvm_emit_intrinsic_args(c, args, arg_slots, 3); // Rearrange to match our builtin with the intrinsic which is value, ptr, align, mask @@ -576,7 +576,7 @@ static void llvm_emit_masked_store(GenContext *c, BEValue *be_value, Expr *expr) static void llvm_emit_scatter(GenContext *c, BEValue *be_value, Expr *expr) { Expr **args = expr->call_expr.arguments; - assert(vec_size(args) == 4); + ASSERT0(vec_size(args) == 4); LLVMValueRef arg_slots[4]; llvm_emit_intrinsic_args(c, args, arg_slots, 3); // Rearrange to match our builtin with the intrinsic which is value, ptr, align, mask @@ -598,8 +598,8 @@ void llvm_emit_builtin_args_types3(GenContext *c, BEValue *be_value, Expr *expr, { Expr **args = expr->call_expr.arguments; unsigned count = vec_size(args); - assert(count <= 3); - assert(count > 0); + ASSERT0(count <= 3); + ASSERT0(count > 0); LLVMValueRef arg_slots[3]; llvm_emit_intrinsic_args(c, args, arg_slots, count); LLVMTypeRef call_type[3]; @@ -638,7 +638,7 @@ static void llvm_emit_wrap_builtin(GenContext *c, BEValue *result_value, Expr *e llvm_emit_intrinsic_args(c, args, arg_slots, func == BUILTIN_EXACT_NEG ? 1 : 2); Type *base_type = type_lowering(args[0]->type); if (base_type->type_kind == TYPE_VECTOR) base_type = base_type->array.base; - assert(type_is_integer(base_type)); + ASSERT0(type_is_integer(base_type)); bool is_signed = type_is_signed(base_type); LLVMValueRef res; switch (func) @@ -686,7 +686,7 @@ static void llvm_emit_veccomp(GenContext *c, BEValue *value, Expr *expr, Builtin Expr **args = expr->call_expr.arguments; unsigned count = vec_size(args); (void)count; - assert(count == 2); + ASSERT0(count == 2); LLVMValueRef mask; llvm_emit_expr(c, value, args[0]); @@ -776,7 +776,7 @@ static inline void llvm_emit_any_make(GenContext *c, BEValue *value, Expr *expr) LLVMValueRef var = llvm_get_undef(c, type_any); var = llvm_emit_insert_value(c, var, ptr.value, 0); var = llvm_emit_insert_value(c, var, typeid_value.value, 1); - assert(!LLVMIsConstant(ptr.value) || !LLVMIsConstant(typeid_value.value) || LLVMIsConstant(var)); + ASSERT0(!LLVMIsConstant(ptr.value) || !LLVMIsConstant(typeid_value.value) || LLVMIsConstant(var)); llvm_value_set(value, var, type_any); } diff --git a/src/compiler/llvm_codegen_debug_info.c b/src/compiler/llvm_codegen_debug_info.c index 9a49d9bca..c90c6babc 100644 --- a/src/compiler/llvm_codegen_debug_info.c +++ b/src/compiler/llvm_codegen_debug_info.c @@ -60,7 +60,7 @@ static inline LLVMMetadataRef llvm_get_debug_struct(GenContext *c, Type *type, c static inline LLVMMetadataRef llvm_get_debug_member(GenContext *c, Type *type, const char *name, unsigned offset, SourceSpan *loc, LLVMMetadataRef scope, LLVMDIFlags flags) { - assert(name && scope); + ASSERT0(name && scope); return LLVMDIBuilderCreateMemberType( c->debug.builder, scope, @@ -89,8 +89,8 @@ void llvm_emit_debug_function(GenContext *c, Decl *decl) uint32_t row = decl->span.row; if (!row) row = 1; - assert(decl->name); - assert(c->debug.file.debug_file); + ASSERT0(decl->name); + ASSERT0(c->debug.file.debug_file); LLVMMetadataRef debug_type = llvm_get_debug_type(c, decl->type); scratch_buffer_set_extern_decl_name(decl, true); c->debug.function = LLVMDIBuilderCreateFunction(c->debug.builder, @@ -144,7 +144,7 @@ static void llvm_emit_debug_declare(GenContext *c, LLVMValueRef var, LLVMMetadat void llvm_emit_debug_local_var(GenContext *c, Decl *decl) { - assert(llvm_is_local_eval(c)); + ASSERT0(llvm_is_local_eval(c)); EMIT_LOC(c, decl); uint32_t row = decl->span.row; uint32_t col = decl->span.col; @@ -166,7 +166,7 @@ void llvm_emit_debug_local_var(GenContext *c, Decl *decl) decl->alignment); decl->var.backend_debug_ref = var; - assert(!decl->is_value); + ASSERT0(!decl->is_value); llvm_emit_debug_declare(c, decl->backend_ref, var, row, col, scope); } @@ -178,7 +178,7 @@ void llvm_emit_debug_local_var(GenContext *c, Decl *decl) */ void llvm_emit_debug_parameter(GenContext *c, Decl *parameter, unsigned index) { - assert(!llvm_is_global_eval(c)); + ASSERT0(!llvm_is_global_eval(c)); const char *name = parameter->name ? parameter->name : ".anon"; bool always_preserve = false; diff --git a/src/compiler/llvm_codegen_expr.c b/src/compiler/llvm_codegen_expr.c index 09c88c4e5..bca2d8355 100644 --- a/src/compiler/llvm_codegen_expr.c +++ b/src/compiler/llvm_codegen_expr.c @@ -76,14 +76,14 @@ LLVMValueRef llvm_emit_exprid_to_rvalue(GenContext *c, ExprId expr_id) void llvm_emit_assume_true(GenContext *c, BEValue *assume_true) { - assert(llvm_value_is_bool(assume_true)); + ASSERT0(llvm_value_is_bool(assume_true)); LLVMValueRef value = assume_true->value; llvm_emit_call_intrinsic(c, intrinsic_id.assume, NULL, 0, &value, 1); } LLVMValueRef llvm_emit_expect_false(GenContext *c, BEValue *expect_false) { - assert(llvm_value_is_bool(expect_false)); + ASSERT0(llvm_value_is_bool(expect_false)); LLVMValueRef values[2] = { expect_false->value, LLVMConstNull(c->bool_type) }; return llvm_emit_call_intrinsic(c, intrinsic_id.expect, &c->bool_type, 1, values, 2); } @@ -96,7 +96,7 @@ LLVMValueRef llvm_emit_expect_raw(GenContext *c, LLVMValueRef expect_true) BEValue llvm_emit_assign_expr(GenContext *c, BEValue *ref, Expr *expr, LLVMValueRef optional, bool is_init) { - assert(llvm_value_is_addr(ref)); + ASSERT0(llvm_value_is_addr(ref)); assert((optional || !IS_OPTIONAL(expr)) && "Assumed an optional address if it's an optional expression."); @@ -133,7 +133,7 @@ BEValue llvm_emit_assign_expr(GenContext *c, BEValue *ref, Expr *expr, LLVMValue if (IS_OPTIONAL(expr)) { assign_block = llvm_basic_block_new(c, "after_assign"); - assert(optional); + ASSERT0(optional); if (c->catch.fault) { c->catch.block = rejump_block = llvm_basic_block_new(c, "optional_assign_jump"); @@ -322,7 +322,7 @@ static inline LLVMValueRef llvm_mask_low_bits(GenContext *c, LLVMValueRef value, */ LLVMTypeRef llvm_const_padding_type(GenContext *c, AlignSize size) { - assert(size > 0); + ASSERT0(size > 0); if (size == 1) return c->byte_type; return LLVMArrayType(c->byte_type, (unsigned)size); } @@ -345,7 +345,7 @@ static inline LLVMValueRef llvm_emit_add_int(GenContext *c, Type *type, LLVMValu { LLVMTypeRef type_to_use = llvm_get_type(c, type->canonical); LLVMValueRef args[2] = { left, right }; - assert(type->canonical == type); + ASSERT0(type->canonical == type); LLVMValueRef add_res; if (type_is_unsigned(type)) { @@ -419,7 +419,7 @@ LLVMValueRef llvm_coerce_int_ptr(GenContext *c, LLVMValueRef value, LLVMTypeRef bool to_is_pointer = LLVMGetTypeKind(to) == LLVMPointerTypeKind; if (LLVMGetTypeKind(from) == LLVMPointerTypeKind) { - assert(!to_is_pointer && "ptr<->ptr should never happen in LLVM 15+"); + ASSERT0(!to_is_pointer && "ptr<->ptr should never happen in LLVM 15+"); from = llvm_get_type(c, type_iptr); value = LLVMBuildPtrToInt(c->builder, value, from, ""); } @@ -550,7 +550,7 @@ void llvm_emit_coerce_store(GenContext *c, LLVMValueRef addr, AlignSize alignmen return; } - assert(coerced_type_kind != LLVMScalableVectorTypeKind && "Scalable vectors are not supported."); + ASSERT0(coerced_type_kind != LLVMScalableVectorTypeKind && "Scalable vectors are not supported."); ByteSize target_size = llvm_alloc_size(c, target_type); if (src_size <= target_size && coerced_type_kind != LLVMScalableVectorTypeKind && source_type_kind != LLVMScalableVectorTypeKind) @@ -581,7 +581,7 @@ static inline LLVMValueRef llvm_emit_sub_int(GenContext *c, Type *type, LLVMValu { LLVMTypeRef type_to_use = llvm_get_type(c, type); LLVMValueRef args[2] = { left, right }; - assert(type_lowering(type) == type); + ASSERT0(type_lowering(type) == type); LLVMValueRef add_res; if (type_is_unsigned(type)) { @@ -624,7 +624,7 @@ static void llvm_emit_array_bounds_check(GenContext *c, BEValue *index, LLVMValu static inline void llvm_emit_subscript_addr_with_base(GenContext *c, BEValue *result, BEValue *parent, BEValue *index, SourceSpan loc) { - assert(llvm_value_is_addr(parent)); + ASSERT0(llvm_value_is_addr(parent)); Type *type = type_lowering(parent->type); switch (type->type_kind) { @@ -661,7 +661,7 @@ static inline void llvm_emit_vector_subscript(GenContext *c, BEValue *value, Exp llvm_emit_exprid(c, value, expr->subscript_expr.expr); llvm_value_rvalue(c, value); Type *vec = value->type; - assert(vec->type_kind == TYPE_VECTOR); + ASSERT0(vec->type_kind == TYPE_VECTOR); Type *element = vec->array.base; LLVMValueRef vector = value->value; llvm_emit_exprid(c, value, expr->subscript_expr.index.expr); @@ -700,7 +700,7 @@ static inline void llvm_emit_subscript_addr(GenContext *c, BEValue *value, Expr else if (parent_type_kind == TYPE_ARRAY) { // From back should always be folded. - assert(!expr_is_const(expr) || !start_from_end); + ASSERT0(!expr_is_const(expr) || !start_from_end); needs_len = (safe_mode_enabled() && !expr_is_const(expr)) || start_from_end; } if (needs_len) @@ -719,7 +719,7 @@ static inline void llvm_emit_subscript_addr(GenContext *c, BEValue *value, Expr if (start_from_end) { - assert(needs_len); + ASSERT0(needs_len); index.value = LLVMBuildNUWSub(c->builder, llvm_zext_trunc(c, len.value, llvm_get_type(c, index.type)), index.value, ""); } if (needs_len && safe_mode_enabled() && !llvm_is_global_eval(c)) @@ -727,7 +727,7 @@ static inline void llvm_emit_subscript_addr(GenContext *c, BEValue *value, Expr llvm_emit_array_bounds_check(c, &index, len.value, index_expr->span); } llvm_emit_subscript_addr_with_base(c, value, value, &index, index_expr->span); - assert(llvm_value_is_addr(value)); + ASSERT0(llvm_value_is_addr(value)); llvm_value_fold_optional(c, value); } @@ -793,13 +793,13 @@ static ArrayIndex find_member_index(Decl *parent, Decl *member) static void llvm_emit_member_addr(GenContext *c, BEValue *value, Decl *parent, Decl *member) { - assert(member->resolve_status == RESOLVE_DONE); + ASSERT0(member->resolve_status == RESOLVE_DONE); Decl *found = NULL; do { ArrayIndex index = find_member_index(parent, member); - assert(index > -1); + ASSERT0(index > -1); found = parent->strukt.members[index]; switch (parent->type->canonical->type_kind) { @@ -820,13 +820,13 @@ static void llvm_emit_member_addr(GenContext *c, BEValue *value, Decl *parent, D static Decl *llvm_emit_bitstruct_member(GenContext *c, BEValue *value, Decl *parent, Decl *member) { - assert(member->resolve_status == RESOLVE_DONE); + ASSERT0(member->resolve_status == RESOLVE_DONE); Decl *found = parent; Decl *last = NULL; do { ArrayIndex index = find_member_index(parent, member); - assert(index > -1); + ASSERT0(index > -1); last = found; found = parent->strukt.members[index]; switch (parent->type->canonical->type_kind) @@ -855,7 +855,7 @@ static LLVMValueRef llvm_emit_bswap(GenContext *c, LLVMValueRef value) return LLVMConstBswap(value); } LLVMTypeRef type = LLVMTypeOf(value); - assert(type != c->byte_type); + ASSERT0(type != c->byte_type); return llvm_emit_call_intrinsic(c, intrinsic_id.bswap, &type, 1, &value, 1); } @@ -889,7 +889,7 @@ static inline void llvm_extract_bool_bit_from_array(GenContext *c, BEValue *be_v static inline LLVMValueRef llvm_bswap_non_integral(GenContext *c, LLVMValueRef value, unsigned bitsize) { if (bitsize <= 8) return value; - assert(is_power_of_two(bitsize)); + ASSERT0(is_power_of_two(bitsize)); LLVMValueRef shifted = llvm_emit_shl_fixed(c, value, (int)llvm_bitsize(c, LLVMTypeOf(value)) - (int)bitsize); return llvm_emit_bswap(c, shifted); } @@ -1018,7 +1018,7 @@ static inline void llvm_emit_update_bitstruct_array(GenContext *c, Type *member_type = type_flatten(member->type); if (member_type == type_bool) { - assert(start_bit == end_bit); + ASSERT0(start_bit == end_bit); value = llvm_emit_shl_fixed(c, value, start_bit % 8); AlignSize alignment; LLVMValueRef byte_ptr = llvm_emit_array_gep_raw(c, array_ptr, array_type, start_bit / 8, array_alignment, &alignment); @@ -1035,7 +1035,7 @@ static inline void llvm_emit_update_bitstruct_array(GenContext *c, { value = llvm_bswap_non_integral(c, value, bit_size); } - assert(bit_size > 0 && bit_size <= 128); + ASSERT0(bit_size > 0 && bit_size <= 128); int start_byte = start_bit / 8; int end_byte = end_bit / 8; int start_mod = start_bit % 8; @@ -1183,7 +1183,7 @@ static inline void llvm_emit_bitaccess(GenContext *c, BEValue *be_value, Expr *e llvm_emit_expr(c, be_value, parent); Decl *member = expr->access_expr.ref; - assert(be_value && be_value->type); + ASSERT0(be_value && be_value->type); Decl *parent_decl = llvm_emit_bitstruct_member(c, be_value, type_flatten(parent->type)->decl, member); llvm_extract_bitvalue(c, be_value, parent_decl, expr->access_expr.ref); @@ -1200,7 +1200,7 @@ static inline void llvm_emit_access_addr(GenContext *c, BEValue *be_value, Expr llvm_value_rvalue(c, be_value); if (!flat_type->decl->backend_ref) llvm_get_typeid(c, parent->type); - assert(member->backend_ref); + ASSERT0(member->backend_ref); LLVMTypeRef value_type = llvm_get_type(c, type_get_array(member->type, vec_size(flat_type->decl->enums.values))); AlignSize align = LLVMGetAlignment(member->backend_ref); AlignSize alignment; @@ -1419,7 +1419,7 @@ static LLVMValueRef llvm_emit_char_array_zero(GenContext *c, BEValue *value, boo { llvm_value_addr(c, value); unsigned len = type_size(value->type); - assert(len > 0); + ASSERT0(len > 0); LLVMValueRef total = NULL; for (int i = 0; i < len; i++) { @@ -1522,7 +1522,7 @@ void llvm_emit_cast(GenContext *c, CastKind cast_kind, Expr *expr, BEValue *valu break; case CAST_EREU: // This is a no op. - assert(type_lowering(to_type) == type_lowering(from_type)); + ASSERT0(type_lowering(to_type) == type_lowering(from_type)); break; case CAST_VECARR: llvm_emit_vector_to_array_cast(c, value, to_type, from_type); @@ -1763,7 +1763,7 @@ static inline void llvm_emit_const_initialize_bitstruct_ref(GenContext *c, BEVal llvm_store_zero(c, ref); return; } - assert(initializer->kind == CONST_INIT_STRUCT); + ASSERT0(initializer->kind == CONST_INIT_STRUCT); llvm_store_raw(c, ref, llvm_emit_const_bitstruct(c, initializer)); } @@ -1818,7 +1818,7 @@ static void llvm_emit_const_init_ref(GenContext *c, BEValue *ref, ConstInitializ Type *element_type = array_type->array.base; ArrayIndex size = (ArrayIndex)array_type->array.len; LLVMTypeRef array_type_llvm = llvm_get_type(c, array_type); - assert(size <= UINT32_MAX); + ASSERT0(size <= UINT32_MAX); for (ArrayIndex i = 0; i < size; i++) { AlignSize alignment; @@ -1841,7 +1841,7 @@ static void llvm_emit_const_init_ref(GenContext *c, BEValue *ref, ConstInitializ LLVMValueRef *parts = NULL; FOREACH(ConstInitializer *, element, elements) { - assert(element->kind == CONST_INIT_ARRAY_VALUE); + ASSERT0(element->kind == CONST_INIT_ARRAY_VALUE); ArrayIndex element_index = element->init_array_value.index; AlignSize alignment; LLVMValueRef array_pointer = llvm_emit_array_gep_raw(c, array_ref, array_type_llvm, (unsigned)element_index, ref->alignment, &alignment); @@ -1867,7 +1867,7 @@ static void llvm_emit_const_init_ref(GenContext *c, BEValue *ref, ConstInitializ case CONST_INIT_STRUCT: { Decl *decl = const_init->type->decl; - assert(vec_size(decl->strukt.members) == vec_size(const_init->init_struct)); + ASSERT0(vec_size(decl->strukt.members) == vec_size(const_init->init_struct)); FOREACH_IDX(i, ConstInitializer *, init, const_init->init_struct) { BEValue value; @@ -1952,7 +1952,7 @@ static inline void llvm_emit_initialize_reference_list(GenContext *c, BEValue *r { Type *type = type_flatten(expr->type); Expr **elements = expr->initializer_list; - assert(type->type_kind != TYPE_SLICE); + ASSERT0(type->type_kind != TYPE_SLICE); if (type->type_kind == TYPE_BITSTRUCT) { @@ -2018,7 +2018,7 @@ static void llvm_emit_initialize_designated_const_range(GenContext *c, BEValue * DesignatorElement *curr = current[0]; llvm_value_addr(c, ref); - assert(curr->kind == DESIGNATOR_RANGE); + ASSERT0(curr->kind == DESIGNATOR_RANGE); BEValue emitted_local; if (!emitted_value) @@ -2085,11 +2085,11 @@ static void llvm_emit_initialize_designated_element(GenContext *c, BEValue *ref, } if (type->type_kind == TYPE_BITSTRUCT && last == current + 1) { - assert(llvm_value_is_addr(&value)); + ASSERT0(llvm_value_is_addr(&value)); Decl *member = type->decl->strukt.members[last[0]->index]; // Special handling of bitstructs. Type *underlying_type = value.type; - assert(!emitted_value); + ASSERT0(!emitted_value); BEValue exprval; llvm_emit_expr(c, &exprval, expr); LLVMValueRef val = llvm_load_value_store(c, &exprval); @@ -2137,9 +2137,9 @@ static inline void llvm_emit_initialize_reference_designated_bitstruct_array(Gen // Now walk through the elements. FOREACH(Expr *, designator, elements) { - assert(vec_size(designator->designator_expr.path) == 1); + ASSERT0(vec_size(designator->designator_expr.path) == 1); DesignatorElement *element = designator->designator_expr.path[0]; - assert(element->kind == DESIGNATOR_FIELD); + ASSERT0(element->kind == DESIGNATOR_FIELD); Decl *member = bitstruct->strukt.members[element->index]; BEValue val; llvm_emit_expr(c, &val, designator->designator_expr.value); @@ -2162,9 +2162,9 @@ static inline void llvm_emit_initialize_reference_designated_bitstruct(GenContex // Now walk through the elements. FOREACH(Expr *, designator, elements) { - assert(vec_size(designator->designator_expr.path) == 1); + ASSERT0(vec_size(designator->designator_expr.path) == 1); DesignatorElement *element = designator->designator_expr.path[0]; - assert(element->kind == DESIGNATOR_FIELD); + ASSERT0(element->kind == DESIGNATOR_FIELD); Decl *member = bitstruct->strukt.members[element->index]; BEValue val; llvm_emit_expr(c, &val, designator->designator_expr.value); @@ -2180,9 +2180,9 @@ static inline void llvm_emit_initialize_reference_designated_bitstruct(GenContex static inline void llvm_emit_initialize_reference_designated(GenContext *c, BEValue *ref, Expr *expr) { Expr **elements = expr->designated_init_list; - assert(vec_size(elements)); + ASSERT0(vec_size(elements)); Type *type = type_flatten(expr->type); - assert(type->type_kind != TYPE_SLICE); + ASSERT0(type->type_kind != TYPE_SLICE); if (type->type_kind == TYPE_BITSTRUCT) { llvm_emit_initialize_reference_designated_bitstruct(c, ref, type->decl, elements); @@ -2207,7 +2207,7 @@ static inline void llvm_emit_initialize_reference_designated(GenContext *c, BEVa static bool bitstruct_requires_bitswap(Decl *decl) { - assert(decl->decl_kind == DECL_BITSTRUCT); + ASSERT0(decl->decl_kind == DECL_BITSTRUCT); bool big_endian = compiler.platform.big_endian; if (decl->strukt.big_endian) return !big_endian; if (decl->strukt.little_endian) return big_endian; @@ -2226,7 +2226,7 @@ LLVMValueRef llvm_emit_const_bitstruct_array(GenContext *c, ConstInitializer *in slots[i] = llvm_get_zero_raw(c->byte_type); } Decl **members = decl->strukt.members; - assert(vec_size(members) == vec_size(initializer->init_struct)); + ASSERT0(vec_size(members) == vec_size(initializer->init_struct)); FOREACH_IDX(i, ConstInitializer *, init, initializer->init_struct) { Decl *member = members[i]; @@ -2234,14 +2234,14 @@ LLVMValueRef llvm_emit_const_bitstruct_array(GenContext *c, ConstInitializer *in unsigned end_bit = member->var.end_bit; Type *member_type = type_flatten(member->type); if (init->kind == CONST_INIT_ZERO) continue; - assert(init->kind == CONST_INIT_VALUE); + ASSERT0(init->kind == CONST_INIT_VALUE); Expr *expr = init->init_value; // Special case for bool if (member_type == type_bool) { - assert(expr_is_const_bool(expr)); - assert(start_bit == end_bit); + ASSERT0(expr_is_const_bool(expr)); + ASSERT0(start_bit == end_bit); // Completely skip zero. if (!expr->const_expr.b) continue; @@ -2253,10 +2253,10 @@ LLVMValueRef llvm_emit_const_bitstruct_array(GenContext *c, ConstInitializer *in continue; } unsigned bit_size = end_bit - start_bit + 1; - assert(bit_size > 0 && bit_size <= 128); + ASSERT0(bit_size > 0 && bit_size <= 128); BEValue val; llvm_emit_const_expr(c, &val, init->init_value); - assert(val.kind == BE_VALUE); + ASSERT0(val.kind == BE_VALUE); LLVMValueRef value = val.value; int start_byte = start_bit / 8; int end_byte = end_bit / 8; @@ -2305,14 +2305,14 @@ LLVMValueRef llvm_emit_const_bitstruct(GenContext *c, ConstInitializer *initiali Decl **members = decl->strukt.members; TypeSize base_type_size = type_size(base_type); TypeSize base_type_bitsize = base_type_size * 8; - assert(vec_size(members) == vec_size(initializer->init_struct)); + ASSERT0(vec_size(members) == vec_size(initializer->init_struct)); FOREACH_IDX(i, ConstInitializer *, val, initializer->init_struct) { Decl *member = members[i]; unsigned start_bit = member->var.start_bit; unsigned end_bit = member->var.end_bit; unsigned bit_size = end_bit - start_bit + 1; - assert(bit_size > 0 && bit_size <= 128); + ASSERT0(bit_size > 0 && bit_size <= 128); LLVMValueRef value; if (val->kind == CONST_INIT_ZERO) { @@ -2321,7 +2321,7 @@ LLVMValueRef llvm_emit_const_bitstruct(GenContext *c, ConstInitializer *initiali else { BEValue entry; - assert(val->kind == CONST_INIT_VALUE); + ASSERT0(val->kind == CONST_INIT_VALUE); llvm_emit_const_expr(c, &entry, val->init_value); value = llvm_load_value_store(c, &entry); } @@ -2350,8 +2350,8 @@ LLVMValueRef llvm_emit_const_bitstruct(GenContext *c, ConstInitializer *initiali */ static inline void llvm_emit_const_initialize_reference(GenContext *c, BEValue *ref, Expr *expr) { - assert(expr_is_const_initializer(expr)); - assert(type_flatten(expr->type)->type_kind != TYPE_SLICE); + ASSERT0(expr_is_const_initializer(expr)); + ASSERT0(type_flatten(expr->type)->type_kind != TYPE_SLICE); llvm_emit_const_init_ref(c, ref, expr->const_expr.initializer, true); return; } @@ -2382,7 +2382,7 @@ static inline void llvm_emit_initialize_reference(GenContext *c, BEValue *ref, E static inline LLVMValueRef llvm_emit_inc_dec_value(GenContext *c, SourceSpan span, BEValue *original, int diff, bool allow_wrap) { - assert(!llvm_value_is_addr(original)); + ASSERT0(!llvm_value_is_addr(original)); Type *type = original->type; switch (type->type_kind) @@ -2530,7 +2530,7 @@ static inline void llvm_emit_pre_post_inc_dec_vector(GenContext *c, BEValue *val // But we also want the value (of the full vector) llvm_value_rvalue(c, value); Type *vec = value->type; - assert(vec->type_kind == TYPE_VECTOR); + ASSERT0(vec->type_kind == TYPE_VECTOR); Type *element = vec->array.base; LLVMValueRef vector = value->value; @@ -2770,7 +2770,7 @@ static void llvm_emit_unary_expr(GenContext *c, BEValue *value, Expr *expr) value->value = LLVMBuildFNeg(c->builder, value->value, "fneg"); return; } - assert(type->canonical != type_bool); + ASSERT0(type->canonical != type_bool); if (compiler.build.feature.trap_on_wrap && !type_flat_is_vector(value->type)) { LLVMValueRef zero = llvm_get_zero(c, expr->unary_expr.expr->type); @@ -2860,7 +2860,7 @@ static void llvm_emit_trap_zero(GenContext *c, Type *type, LLVMValueRef value, c { if (!safe_mode_enabled()) return; - assert(type == type_flatten(type)); + ASSERT0(type == type_flatten(type)); if (type_flat_is_vector(type)) { @@ -2934,7 +2934,7 @@ static void llvm_emit_trap_invalid_shift(GenContext *c, LLVMValueRef value, Type static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_ref, BEValue *start_ref, BEValue *end_ref, bool *is_exclusive) { - assert(slice->expr_kind == EXPR_SLICE); + ASSERT0(slice->expr_kind == EXPR_SLICE); Expr *parent_expr = exprptr(slice->subscript_expr.expr); @@ -3000,7 +3000,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r check_end = false; break; case TYPE_SLICE: - assert(parent_load_value); + ASSERT0(parent_load_value); llvm_value_set(&len, llvm_emit_extract_value(c, parent_load_value, 1), start_type); break; case TYPE_ARRAY: @@ -3021,7 +3021,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r // Check that index does not extend beyond the length. if (check_end && safe_mode_enabled()) { - assert(len.value); + ASSERT0(len.value); BEValue exceeds_size; llvm_emit_int_comp(c, &exceeds_size, &start_index, &len, BINARYOP_GT); llvm_emit_panic_if_true(c, &exceeds_size, "Index exceeds array len", slice->span, "Index exceeds array length (array had size %d, index was %d).", &len, &start_index); @@ -3047,11 +3047,11 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r end_type = end_index.type; break; case RANGE_CONST_LEN: - assert(range.is_len); + ASSERT0(range.is_len); llvm_value_set_int(c, &end_index, end_type, range.const_end); break; case RANGE_CONST_END: - assert(!range.is_len); + ASSERT0(!range.is_len); llvm_value_set_int(c, &end_index, end_type, range.const_end); break; case RANGE_CONST_RANGE: @@ -3062,7 +3062,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r // Reverse if it is "from back" if (end_from_end) { - assert(range.range_type == RANGE_DYNAMIC); + ASSERT0(range.range_type == RANGE_DYNAMIC); end_index.value = llvm_emit_sub_int(c, end_index.type, len.value, end_index.value, slice->span); llvm_value_rvalue(c, &end_index); } @@ -3101,7 +3101,7 @@ static void llvm_emit_slice_values(GenContext *c, Expr *slice, BEValue *parent_r } else { - assert(len.value && "Pointer should never end up here."); + ASSERT0(len.value && "Pointer should never end up here."); end_index.value = len.value; end_type = start_type; // Use "len-range" when implicit, this avoids len - 1 here. @@ -3209,7 +3209,7 @@ static void llvm_emit_slice_assign(GenContext *c, BEValue *be_value, Expr *expr) // First, find the value assigned. Expr *assigned_value = exprptr(expr->slice_assign_expr.right); llvm_emit_expr(c, be_value, assigned_value); - assert(!IS_OPTIONAL(assigned_value)); + ASSERT0(!IS_OPTIONAL(assigned_value)); // If this is copying a big value, then first store it in a variable, this is to // ensure value semantics even in special cases. if (llvm_value_is_addr(be_value) && type_size(assigned_value->type) > 16) @@ -3235,15 +3235,15 @@ static void llvm_emit_slice_assign(GenContext *c, BEValue *be_value, Expr *expr) if (llvm_is_const(start.value) && llvm_is_const(end.value)) { - assert(type_is_integer(start.type) && type_is_integer(end.type)); + ASSERT0(type_is_integer(start.type) && type_is_integer(end.type)); bool signed_start = type_is_signed(start.type); bool signed_end = type_is_signed(end.type); uint64_t start_val = signed_start ? (uint64_t)LLVMConstIntGetSExtValue(start.value) : (uint64_t)LLVMConstIntGetZExtValue(start.value); uint64_t end_val = signed_end ? (uint64_t)LLVMConstIntGetSExtValue(end.value) : (uint64_t)LLVMConstIntGetZExtValue(end.value); - assert(start_val <= INT64_MAX); - assert(end_val <= INT64_MAX); + ASSERT0(start_val <= INT64_MAX); + ASSERT0(end_val <= INT64_MAX); if (start_val > end_val) return; if (is_exclusive) { @@ -3404,7 +3404,7 @@ void llvm_emit_int_comp_raw(GenContext *c, BEValue *result, Type *lhs_type, Type } else { - assert(type_is_integer_or_bool_kind(lhs_type)); + ASSERT0(type_is_integer_or_bool_kind(lhs_type)); lhs_signed = type_is_signed(lhs_type); rhs_signed = type_is_signed(rhs_type); } @@ -3467,7 +3467,7 @@ void llvm_emit_int_comp_raw(GenContext *c, BEValue *result, Type *lhs_type, Type if (!lhs_signed) { - assert(lhs_signed == rhs_signed); + ASSERT0(lhs_signed == rhs_signed); // Right and left side are both unsigned. LLVMValueRef value; switch (binary_op) @@ -4059,8 +4059,8 @@ void llvm_emit_lhs_is_subtype(GenContext *c, BEValue *result, BEValue *lhs, BEVa void llvm_emit_comp(GenContext *c, BEValue *result, BEValue *lhs, BEValue *rhs, BinaryOp binary_op) { - assert(type_lowering(lhs->type) == lhs->type); - assert(binary_op >= BINARYOP_GT && binary_op <= BINARYOP_EQ); + ASSERT0(type_lowering(lhs->type) == lhs->type); + ASSERT0(binary_op >= BINARYOP_GT && binary_op <= BINARYOP_EQ); switch (lhs->type->type_kind) { case TYPE_VOID: @@ -4269,7 +4269,7 @@ INLINE bool llvm_emit_fmuladd_maybe(GenContext *c, BEValue *be_value, Expr *expr break; case FMUL_RHS_NEG_MULT: rhs = rhs->unary_expr.expr; - assert(!negate_rhs); + ASSERT0(!negate_rhs); args[0] = llvm_emit_exprid_to_rvalue(c, rhs->binary_expr.left); args[1] = llvm_emit_exprid_to_rvalue(c, rhs->binary_expr.right); @@ -4446,7 +4446,7 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs case BINARYOP_ADD: if (lhs_type->type_kind == TYPE_POINTER) { - assert(type_is_integer(rhs_type)); + ASSERT0(type_is_integer(rhs_type)); val = llvm_emit_pointer_gep_raw(c, llvm_get_pointee_type(c, lhs_type), lhs_value, rhs_value); break; } @@ -4543,7 +4543,7 @@ void llvm_emit_binary(GenContext *c, BEValue *be_value, Expr *expr, BEValue *lhs // Handled elsewhere. UNREACHABLE } - assert(val); + ASSERT0(val); llvm_value_set(be_value, val, expr->type); } @@ -4565,8 +4565,8 @@ void llvm_emit_typeid(GenContext *c, BEValue *be_value, Type *type) void llvm_emit_try_assign_try_catch(GenContext *c, bool is_try, BEValue *be_value, BEValue *var_addr, BEValue *catch_addr, Expr *rhs) { - assert(!catch_addr || llvm_value_is_addr(catch_addr)); - assert(!var_addr || llvm_value_is_addr(var_addr)); + ASSERT0(!catch_addr || llvm_value_is_addr(catch_addr)); + ASSERT0(!var_addr || llvm_value_is_addr(var_addr)); // 1. Create after try/catch block LLVMBasicBlockRef catch_block = llvm_basic_block_new(c, "catch_landing"); @@ -4587,7 +4587,7 @@ void llvm_emit_try_assign_try_catch(GenContext *c, bool is_try, BEValue *be_valu // 7. If we have a variable, then we make the store. if (var_addr) { - assert(is_try && "Storing will only happen on try."); + ASSERT0(is_try && "Storing will only happen on try."); llvm_store(c, var_addr, be_value); } @@ -4732,7 +4732,7 @@ static void llvm_emit_vector_assign_expr(GenContext *c, BEValue *be_value, Expr if (binary_op > BINARYOP_ASSIGN) { BinaryOp base_op = binaryop_assign_base_op(binary_op); - assert(base_op != BINARYOP_ERROR); + ASSERT0(base_op != BINARYOP_ERROR); BEValue lhs; llvm_value_set(&lhs, LLVMBuildExtractElement(c->builder, vector_value, index_val, "elem"), expr->type); llvm_emit_binary(c, be_value, expr, &lhs, base_op); @@ -4761,12 +4761,12 @@ static void llvm_emit_binary_expr(GenContext *c, BEValue *be_value, Expr *expr) { // Finde the base op. BinaryOp base_op = binaryop_assign_base_op(binary_op); - assert(base_op != BINARYOP_ERROR); + ASSERT0(base_op != BINARYOP_ERROR); // Get the left hand side, which must be an address. BEValue addr; llvm_emit_expr(c, &addr, exprptr(expr->binary_expr.left)); - assert(llvm_value_is_addr(&addr)); + ASSERT0(llvm_value_is_addr(&addr)); // Fold the optional. llvm_value_fold_optional(c, &addr); @@ -4783,7 +4783,7 @@ static void llvm_emit_binary_expr(GenContext *c, BEValue *be_value, Expr *expr) { Expr *left = exprptr(expr->binary_expr.left); llvm_emit_expr(c, be_value, left); - assert(llvm_value_is_addr(be_value)); + ASSERT0(llvm_value_is_addr(be_value)); LLVMValueRef optional_ref = NULL; // If the LHS is an identifier, then we're assigning the optional value to that. @@ -4884,7 +4884,7 @@ void gencontext_emit_ternary_expr(GenContext *c, BEValue *value, Expr *expr) LLVMValueRef lhs_value = is_elvis ? value->value : NULL; if (value->kind != BE_BOOLEAN) { - assert(is_elvis); + ASSERT0(is_elvis); CastKind cast = cast_to_bool_kind(value->type); llvm_emit_cast(c, cast, cond, value, type_bool, value->type); } @@ -4979,11 +4979,12 @@ void gencontext_emit_ternary_expr(GenContext *c, BEValue *value, Expr *expr) return; } - if (expr->type == type_void) + if (type_lowering(expr->type) == type_void) { llvm_value_set(value, NULL, expr->type); return; } + llvm_new_phi(c, value, "val", expr->type, lhs_value, lhs_exit, rhs_value, rhs_exit); } static LLVMValueRef llvm_emit_real(LLVMTypeRef type, Float f) @@ -5005,7 +5006,7 @@ static inline void llvm_emit_const_initializer_list_expr(GenContext *c, BEValue { if (llvm_is_global_eval(c) || type_flat_is_vector(expr->type) || type_flatten(expr->type)->type_kind == TYPE_BITSTRUCT) { - assert(type_flatten(expr->type)->type_kind != TYPE_SLICE); + ASSERT0(type_flatten(expr->type)->type_kind != TYPE_SLICE); llvm_value_set(value, llvm_emit_const_initializer(c, expr->const_expr.initializer), expr->type); return; } @@ -5069,14 +5070,14 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) LLVMValueRef global_copy = llvm_add_global_raw(c, ".__const_slice", val_type, alignment); LLVMSetInitializer(global_copy, value); llvm_set_private_declaration(global_copy); - assert(type_is_arraylike(init->type)); + ASSERT0(type_is_arraylike(init->type)); LLVMValueRef val = llvm_emit_aggregate_two(c, type, global_copy, llvm_const_int(c, type_usz, init->type->array.len)); llvm_value_set(be_value, val, type); } else { - assert(type_is_arraylike(init->type)); + ASSERT0(type_is_arraylike(init->type)); llvm_value_set_address_abi_aligned(be_value, llvm_emit_alloca_aligned(c, init->type, "literal"), init->type); llvm_emit_const_init_ref(c, be_value, init, true); LLVMValueRef val = llvm_emit_aggregate_two(c, type, be_value->value, llvm_const_int(c, type_usz, init->type->array.len)); @@ -5189,7 +5190,7 @@ static void llvm_emit_const_expr(GenContext *c, BEValue *be_value, Expr *expr) case CONST_ERR: { Decl *decl = expr->const_expr.enum_err_val; - assert(decl); + ASSERT0(decl); LLVMValueRef value = LLVMBuildPtrToInt(c->builder, llvm_get_ref(c, decl), llvm_get_type(c, type_anyfault), ""); llvm_value_set(be_value, value, type_anyfault); return; @@ -5272,9 +5273,9 @@ static void llvm_expand_type_to_args(GenContext *context, Type *param_type, LLVM void llvm_emit_struct_member_ref(GenContext *c, BEValue *struct_ref, BEValue *member_ref, unsigned member_id) { - assert(llvm_value_is_addr(struct_ref)); + ASSERT0(llvm_value_is_addr(struct_ref)); llvm_value_fold_optional(c, struct_ref); - assert(struct_ref->type->type_kind == TYPE_STRUCT); + ASSERT0(struct_ref->type->type_kind == TYPE_STRUCT); AlignSize align; LLVMValueRef ptr = llvm_emit_struct_gep_raw(c, struct_ref->value, llvm_get_type(c, struct_ref->type), member_id, struct_ref->alignment, &align); llvm_value_set_address(member_ref, ptr, struct_ref->type->decl->strukt.members[member_id]->type, align); @@ -5295,7 +5296,7 @@ LLVMValueRef llvm_emit_array_gep_raw_index(GenContext *c, LLVMValueRef ptr, LLVM LLVMValueRef index_val = llvm_load_value(c, index); LLVMTypeRef element_type = LLVMGetElementType(array_type); Type *index_type = index->type; - assert(type_is_integer(index_type)); + ASSERT0(type_is_integer(index_type)); LLVMTypeRef idx_type = llvm_get_type(c, index_type); if (type_is_unsigned(index_type) && type_size(index_type) < type_size(type_usz)) { @@ -5400,7 +5401,7 @@ void llvm_emit_slice_len(GenContext *c, BEValue *slice, BEValue *len) void llvm_emit_slice_pointer(GenContext *context, BEValue *slice, BEValue *pointer) { - assert(slice->type->type_kind == TYPE_SLICE); + ASSERT0(slice->type->type_kind == TYPE_SLICE); Type *ptr_type = type_get_ptr(slice->type->array.base); if (slice->kind == BE_ADDRESS) { @@ -5456,7 +5457,7 @@ void llvm_value_struct_gep(GenContext *c, BEValue *element, BEValue *struct_poin void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ref, ABIArgInfo *info, BEValue *be_value, Type *type) { type = type_lowering(type); - assert(be_value->type->canonical == type); + ASSERT0(be_value->type->canonical == type); switch (info->kind) { case ABI_ARG_IGNORE: @@ -5465,7 +5466,7 @@ void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ case ABI_ARG_INDIRECT: { // If we want we could optimize for structs by doing it by reference here. - assert(info->indirect.alignment == type_abi_alignment(type) || info->attributes.realign); + ASSERT0(info->indirect.alignment == type_abi_alignment(type) || info->attributes.realign); if (info->attributes.by_val && llvm_value_is_addr(be_value) && info->indirect.alignment <= be_value->alignment) { llvm_value_fold_optional(c, be_value); @@ -5483,7 +5484,7 @@ void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ case ABI_ARG_DIRECT_SPLIT_STRUCT_I32: { LLVMTypeRef coerce_type = llvm_get_coerce_type(c, info); - assert(coerce_type && coerce_type != llvm_get_type(c, type)); + ASSERT0(coerce_type && coerce_type != llvm_get_type(c, type)); AlignSize target_alignment = llvm_abi_alignment(c, coerce_type); AlignSize alignment; @@ -5521,14 +5522,14 @@ void llvm_emit_parameter(GenContext *c, LLVMValueRef *args, unsigned *arg_count_ } case ABI_ARG_DIRECT_PAIR: { - assert(type_flatten(be_value->type) == be_value->type); + ASSERT0(type_flatten(be_value->type) == be_value->type); LLVMTypeRef original_type = llvm_get_type(c, be_value->type); LLVMTypeRef struct_type = llvm_get_coerce_type(c, info); AlignSize alignment; if (llvm_types_are_similar(original_type, struct_type)) { // Optimization - assert(LLVMGetTypeKind(original_type) == LLVMStructTypeKind && LLVMCountStructElementTypes(original_type) == 2); + ASSERT0(LLVMGetTypeKind(original_type) == LLVMStructTypeKind && LLVMCountStructElementTypes(original_type) == 2); if (llvm_value_is_addr(be_value)) { LLVMValueRef ptr = llvm_emit_struct_gep_raw(c, be_value->value, original_type, 0, be_value->alignment, &alignment); @@ -5662,7 +5663,7 @@ void llvm_emit_raw_call(GenContext *c, BEValue *result_value, FunctionPrototype break; } - assert(!prototype->ret_by_ref || prototype->ret_by_ref_abi_info->kind != ABI_ARG_INDIRECT); + ASSERT0(!prototype->ret_by_ref || prototype->ret_by_ref_abi_info->kind != ABI_ARG_INDIRECT); llvm_add_abi_call_attributes(c, call_value, vec_size(prototype->param_types), prototype->abi_args); if (prototype->abi_varargs) @@ -5682,7 +5683,7 @@ void llvm_emit_raw_call(GenContext *c, BEValue *result_value, FunctionPrototype case ABI_ARG_IGNORE: // 12. Basically void returns or empty structs. // Here we know we don't have an optional or any return value that can be used. - assert(!prototype->is_optional && "Optional should have produced a return value."); + ASSERT0(!prototype->is_optional && "Optional should have produced a return value."); *result_value = (BEValue) { .type = type_void, .kind = BE_VALUE }; return; case ABI_ARG_INDIRECT: @@ -5695,7 +5696,7 @@ void llvm_emit_raw_call(GenContext *c, BEValue *result_value, FunctionPrototype // 13b. If not it will be contained in a be_value that is an address, // so we don't need to do anything more. - assert(result_value->kind == BE_ADDRESS); + ASSERT0(result_value->kind == BE_ADDRESS); break; case ABI_ARG_DIRECT_PAIR: @@ -6012,12 +6013,12 @@ INLINE void llvm_emit_call_invocation(GenContext *c, BEValue *result_value, break; } // 6b. Return true is indirect, in this case we allocate a local, using the desired alignment on the caller side. - assert(ret_info->attributes.realign || ret_info->indirect.alignment == type_abi_alignment(call_return_type)); + ASSERT0(ret_info->attributes.realign || ret_info->indirect.alignment == type_abi_alignment(call_return_type)); AlignSize alignment = ret_info->indirect.alignment; // If we have a target, then use it. if (target && alignment <= target->alignment) { - assert(target->kind == BE_ADDRESS); + ASSERT0(target->kind == BE_ADDRESS); arg_values[arg_count++] = target->value; sret_return = true; break; @@ -6181,9 +6182,9 @@ static void llvm_emit_call_expr(GenContext *c, BEValue *result_value, Expr *expr // 1. Dynamic dispatch. if (expr->call_expr.is_dynamic_dispatch) { - assert(arg_count); + ASSERT0(arg_count); Expr *any_val = args[0]; - assert(any_val->expr_kind == EXPR_CAST); + ASSERT0(any_val->expr_kind == EXPR_CAST); args[0] = exprptr(any_val->cast_expr.expr); } @@ -6193,7 +6194,7 @@ static void llvm_emit_call_expr(GenContext *c, BEValue *result_value, Expr *expr Expr *function = exprptr(expr->call_expr.function); // 1a. Find the pointee type for the function pointer: - assert(type_flatten(function->type)->type_kind == TYPE_FUNC_PTR); + ASSERT0(type_flatten(function->type)->type_kind == TYPE_FUNC_PTR); Type *type = type_flatten(function->type)->pointer; // 1b. Find the type signature using the underlying pointer. @@ -6228,7 +6229,7 @@ static void llvm_emit_call_expr(GenContext *c, BEValue *result_value, Expr *expr // 2b. Set signature, function and function type prototype = type_get_resolved_prototype(function_decl->type); func = llvm_get_ref(c, function_decl); - assert(func); + ASSERT0(func); func_type = llvm_get_type(c, function_decl->type); } int inline_flag = 0; @@ -6290,7 +6291,7 @@ static void llvm_emit_call_expr(GenContext *c, BEValue *result_value, Expr *expr // 1. Dynamic dispatch. if (expr->call_expr.is_dynamic_dispatch) { - assert(arg_count); + ASSERT0(arg_count); BEValue result = values[0]; BEValue typeid = result; llvm_emit_type_from_any(c, &typeid); @@ -6598,7 +6599,7 @@ static inline void llvm_emit_optional(GenContext *c, BEValue *be_value, Expr *ex // If there is an error value, assign to it. if (c->catch.fault) { - assert(c->catch.fault); + ASSERT0(c->catch.fault); llvm_emit_expr(c, be_value, fail); llvm_store_to_ptr(c, c->catch.fault, be_value); } @@ -6654,7 +6655,7 @@ static inline void llvm_emit_vector_initializer_list(GenContext *c, BEValue *val FOREACH(Expr *, designator, elements) { - assert(vec_size(designator->designator_expr.path) == 1); + ASSERT0(vec_size(designator->designator_expr.path) == 1); DesignatorElement *element = designator->designator_expr.path[0]; llvm_emit_expr(c, &val, designator->designator_expr.value); llvm_value_rvalue(c, &val); @@ -6688,7 +6689,7 @@ static inline void llvm_emit_initializer_list_expr(GenContext *c, BEValue *value llvm_emit_vector_initializer_list(c, value, expr); return; } - assert(!IS_OPTIONAL(expr) || c->catch.block); + ASSERT0(!IS_OPTIONAL(expr) || c->catch.block); llvm_value_set_address_abi_aligned(value, llvm_emit_alloca_aligned(c, type, "literal"), type); llvm_emit_initialize_reference(c, value, expr); } @@ -6759,7 +6760,7 @@ static inline void llvm_emit_try_unwrap(GenContext *c, BEValue *value, Expr *exp llvm_emit_local_decl(c, expr->try_unwrap_expr.decl, &addr); llvm_value_set_decl_address(c, &addr, expr->try_unwrap_expr.decl); } - assert(llvm_value_is_addr(&addr)); + ASSERT0(llvm_value_is_addr(&addr)); llvm_emit_try_assign_try_catch(c, true, value, &addr, NULL, expr->try_unwrap_expr.optional); } @@ -6946,7 +6947,7 @@ void llvm_emit_try_unwrap_chain(GenContext *c, BEValue *value, Expr *expr) { Expr **exprs = expr->try_unwrap_chain_expr; unsigned elements = vec_size(exprs); - assert(elements > 0); + ASSERT0(elements > 0); LLVMBasicBlockRef next_block = NULL; LLVMBasicBlockRef end_block = llvm_basic_block_new(c, "end_chain"); @@ -6955,7 +6956,7 @@ void llvm_emit_try_unwrap_chain(GenContext *c, BEValue *value, Expr *expr) if (elements == 1) { llvm_emit_expr(c, value, exprs[0]); - assert(llvm_value_is_bool(value)); + ASSERT0(llvm_value_is_bool(value)); return; } else @@ -6972,7 +6973,7 @@ void llvm_emit_try_unwrap_chain(GenContext *c, BEValue *value, Expr *expr) BEValue res; llvm_emit_expr(c, &res, link); llvm_value_rvalue(c, &res); - assert(llvm_value_is_bool(&res)); + ASSERT0(llvm_value_is_bool(&res)); llvm_emit_cond_br(c, &res, next_block, fail_block); } llvm_emit_block(c, next_block); @@ -7035,7 +7036,7 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex llvm_emit_any_pointer(c, be_value, be_value); return; } - assert(be_value->type->type_kind == TYPE_SLICE); + ASSERT0(be_value->type->type_kind == TYPE_SLICE); llvm_emit_slice_pointer(c, be_value, be_value); return; case ACCESS_FAULTORDINAL: @@ -7046,7 +7047,7 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex llvm_value_set(be_value, llvm_get_zero(c, type_usz), type_usz); return; } - assert(type_flatten(inner->type)->type_kind == TYPE_FAULTTYPE); + ASSERT0(type_flatten(inner->type)->type_kind == TYPE_FAULTTYPE); llvm_value_rvalue(c, be_value); BEValue zero; LLVMBasicBlockRef exit_block = llvm_basic_block_new(c, "faultordinal_exit"); @@ -7068,7 +7069,7 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex { Type *inner_type = type_no_optional(inner->type)->canonical; (void)inner_type; - assert(inner_type->type_kind == TYPE_FAULTTYPE || inner_type->type_kind == TYPE_ANYFAULT); + ASSERT0(inner_type->type_kind == TYPE_FAULTTYPE || inner_type->type_kind == TYPE_ANYFAULT); llvm_value_rvalue(c, be_value); LLVMValueRef val = llvm_emit_alloca_aligned(c, type_chars, "faultname_zero"); BEValue zero; @@ -7096,7 +7097,7 @@ static inline void llvm_emit_builtin_access(GenContext *c, BEValue *be_value, Ex case ACCESS_ENUMNAME: { Type *inner_type = type_no_optional(inner->type)->canonical; - assert(inner_type->canonical->type_kind == TYPE_ENUM); + ASSERT0(inner_type->canonical->type_kind == TYPE_ENUM); llvm_value_rvalue(c, be_value); LLVMTypeRef slice = llvm_get_type(c, type_chars); LLVMValueRef to_introspect = LLVMBuildIntToPtr(c->builder, llvm_get_typeid(c, inner_type), @@ -7145,7 +7146,7 @@ static LLVMValueRef llvm_get_benchmark_hook_global(GenContext *c, Expr *expr) INLINE void llvm_emit_last_fault(GenContext *c, BEValue *value) { - assert(c->defer_error_var); + ASSERT0(c->defer_error_var); llvm_value_set_address_abi_aligned(value, c->defer_error_var, type_anyfault); } @@ -7192,7 +7193,7 @@ static void llvm_emit_swizzle(GenContext *c, BEValue *value, Expr *expr) LLVMTypeRef result_type = llvm_get_type(c, expr->type); unsigned vec_len = LLVMGetVectorSize(result_type); LLVMValueRef mask_val[4]; - assert(vec_len <= 4); + ASSERT0(vec_len <= 4); const char *sw_ptr = expr->swizzle_expr.swizzle; for (unsigned i = 0; i < vec_len; i++) { @@ -7233,7 +7234,7 @@ void llvm_emit_expr_global_value(GenContext *c, BEValue *value, Expr *expr) { sema_cast_const(expr); llvm_emit_expr(c, value, expr); - assert(!llvm_value_is_addr(value)); + ASSERT0(!llvm_value_is_addr(value)); } void llvm_emit_expr(GenContext *c, BEValue *value, Expr *expr) { @@ -7361,7 +7362,7 @@ void llvm_emit_expr(GenContext *c, BEValue *value, Expr *expr) return; case EXPR_SUBSCRIPT_ADDR: llvm_emit_subscript_addr(c, value, expr); - assert(llvm_value_is_addr(value)); + ASSERT0(llvm_value_is_addr(value)); llvm_value_fold_optional(c, value); value->kind = BE_VALUE; value->type = type_get_ptr(value->type); diff --git a/src/compiler/llvm_codegen_function.c b/src/compiler/llvm_codegen_function.c index 158513e47..4294f0806 100644 --- a/src/compiler/llvm_codegen_function.c +++ b/src/compiler/llvm_codegen_function.c @@ -65,7 +65,7 @@ void llvm_emit_br(GenContext *c, LLVMBasicBlockRef next_block) void llvm_emit_block(GenContext *c, LLVMBasicBlockRef next_block) { - assert(c->current_block == NULL); + ASSERT0(c->current_block == NULL); LLVMAppendExistingBasicBlock(c->cur_func.ref, next_block); LLVMPositionBuilderAtEnd(c->builder, next_block); c->current_block = next_block; @@ -151,7 +151,7 @@ static inline void llvm_process_parameter_value(GenContext *c, Decl *decl, ABIAr // Realign to best alignment. if (pref_align > decl_alignment) decl_alignment = decl->alignment = pref_align; AlignSize hi_offset = aligned_offset(llvm_store_size(c, lo), hi_alignment); - assert(hi_offset + llvm_store_size(c, hi) <= type_size(decl->type)); + ASSERT0(hi_offset + llvm_store_size(c, hi) <= type_size(decl->type)); // Emit decl llvm_emit_and_set_decl_alloca(c, decl); @@ -258,7 +258,7 @@ static inline void llvm_process_parameter_value(GenContext *c, Decl *decl, ABIAr } static inline void llvm_emit_func_parameter(GenContext *context, Decl *decl, ABIArgInfo *abi_info, unsigned *index, unsigned real_index) { - assert(decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_PARAM); + ASSERT0(decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_PARAM); // Allocate room on stack, but do not copy. llvm_process_parameter_value(context, decl, abi_info, index); @@ -307,7 +307,7 @@ void llvm_emit_return_abi(GenContext *c, BEValue *return_value, BEValue *optiona { if (return_value && return_value->type != type_void) { - assert(return_value->value); + ASSERT0(return_value->value); llvm_store_to_ptr_aligned(c, c->return_out, return_value, type_alloca_alignment(return_value->type)); } return_out = c->optional_out; @@ -318,12 +318,12 @@ void llvm_emit_return_abi(GenContext *c, BEValue *return_value, BEValue *optiona } return_value = optional; } - assert(return_value || info->kind == ABI_ARG_IGNORE); + ASSERT0(return_value || info->kind == ABI_ARG_IGNORE); switch (info->kind) { case ABI_ARG_INDIRECT: - assert(return_value); + ASSERT0(return_value); llvm_store_to_ptr_aligned(c, return_out, return_value, info->indirect.alignment); llvm_emit_return_value(c, NULL); return; @@ -405,7 +405,7 @@ void llvm_emit_function_body(GenContext *c, Decl *decl) { DEBUG_LOG("Generating function %s.", decl->name); if (decl->func_decl.attr_dynamic) vec_add(c->dynamic_functions, decl); - assert(decl->backend_ref); + ASSERT0(decl->backend_ref); if (decl->func_decl.attr_init || (decl->func_decl.attr_finalizer && compiler.platform.object_format == OBJ_FORMAT_MACHO)) { llvm_append_xxlizer(c, decl->func_decl.priority, decl->func_decl.attr_init, decl->backend_ref); @@ -437,7 +437,7 @@ void llvm_emit_function_body(GenContext *c, Decl *decl) void llvm_emit_body(GenContext *c, LLVMValueRef function, FunctionPrototype *prototype, Signature *signature, Ast *body, Decl *decl) { - assert(prototype && function && body); + ASSERT0(prototype && function && body); // Signature is NULL if the function is naked. bool emit_debug = llvm_use_debug(c); @@ -487,7 +487,7 @@ void llvm_emit_body(GenContext *c, LLVMValueRef function, FunctionPrototype *pro } if (prototype->ret_by_ref_abi_info) { - assert(!c->return_out); + ASSERT0(!c->return_out); c->return_out = llvm_get_next_param(c, &arg); } @@ -659,7 +659,7 @@ void llvm_emit_dynamic_functions(GenContext *c, Decl **funcs) void llvm_emit_function_decl(GenContext *c, Decl *decl) { - assert(decl->decl_kind == DECL_FUNC); + ASSERT0(decl->decl_kind == DECL_FUNC); // Resolve function backend type for function. decl_append_links_to_global(decl); LLVMValueRef function = llvm_get_ref(c, decl); diff --git a/src/compiler/llvm_codegen_instr.c b/src/compiler/llvm_codegen_instr.c index ce98d2f8c..a94c245f6 100644 --- a/src/compiler/llvm_codegen_instr.c +++ b/src/compiler/llvm_codegen_instr.c @@ -6,22 +6,22 @@ void llvm_emit_cond_br_raw(GenContext *context, LLVMValueRef b, LLVMBasicBlockRef then_block, LLVMBasicBlockRef else_block) { - assert(context->current_block); + ASSERT0(context->current_block); LLVMBuildCondBr(context->builder, b, then_block, else_block); context->current_block = NULL; } void llvm_emit_cond_br(GenContext *context, BEValue *value, LLVMBasicBlockRef then_block, LLVMBasicBlockRef else_block) { - assert(context->current_block); - assert(value->kind == BE_BOOLEAN); + ASSERT0(context->current_block); + ASSERT0(value->kind == BE_BOOLEAN); LLVMBuildCondBr(context->builder, value->value, then_block, else_block); context->current_block = NULL; } LLVMValueRef llvm_emit_lshr_fixed(GenContext *c, LLVMValueRef data, int shift) { - assert(shift >= 0); + ASSERT0(shift >= 0); if (shift == 0) return data; LLVMTypeRef type = LLVMTypeOf(data); BitSize bit_width = llvm_bitsize(c, type); @@ -31,7 +31,7 @@ LLVMValueRef llvm_emit_lshr_fixed(GenContext *c, LLVMValueRef data, int shift) LLVMValueRef llvm_emit_ashr_fixed(GenContext *c, LLVMValueRef data, int shift) { - assert(shift >= 0); + ASSERT0(shift >= 0); if (shift == 0) return data; LLVMTypeRef type = LLVMTypeOf(data); BitSize bit_width = llvm_bitsize(c, type); @@ -41,7 +41,7 @@ LLVMValueRef llvm_emit_ashr_fixed(GenContext *c, LLVMValueRef data, int shift) LLVMValueRef llvm_emit_shl_fixed(GenContext *c, LLVMValueRef data, int shift) { - assert(shift >= 0); + ASSERT0(shift >= 0); if (shift == 0) return data; LLVMTypeRef type = LLVMTypeOf(data); BitSize bit_width = llvm_bitsize(c, type); diff --git a/src/compiler/llvm_codegen_internal.h b/src/compiler/llvm_codegen_internal.h index 1b7492d8f..4766453cc 100644 --- a/src/compiler/llvm_codegen_internal.h +++ b/src/compiler/llvm_codegen_internal.h @@ -310,7 +310,7 @@ LLVMBuilderRef llvm_create_builder(GenContext *c); static inline LLVMValueRef decl_optional_ref(Decl *decl) { - assert(decl->decl_kind == DECL_VAR); + ASSERT0(decl->decl_kind == DECL_VAR); if (decl->var.kind == VARDECL_UNWRAPPED) return decl_optional_ref(decl->var.alias); if (decl->type->type_kind != TYPE_OPTIONAL) return NULL; return decl->var.optional_ref; diff --git a/src/compiler/llvm_codegen_internal_impl.h b/src/compiler/llvm_codegen_internal_impl.h index 4bcdb004b..3f944a2fc 100644 --- a/src/compiler/llvm_codegen_internal_impl.h +++ b/src/compiler/llvm_codegen_internal_impl.h @@ -19,13 +19,13 @@ INLINE LLVMValueRef llvm_zext_trunc(GenContext *c, LLVMValueRef data, LLVMTypeRe { LLVMTypeRef current_type = LLVMTypeOf(data); if (current_type == type) return data; - assert(llvm_is_int_or_vector_int(type)); - assert(llvm_is_int_or_vector_int(current_type)); + ASSERT0(llvm_is_int_or_vector_int(type)); + ASSERT0(llvm_is_int_or_vector_int(current_type)); if (llvm_bitsize(c, current_type) < llvm_bitsize(c, type)) { return LLVMBuildZExt(c->builder, data, type, "zext"); } - assert(llvm_bitsize(c, current_type) > llvm_bitsize(c, type)); + ASSERT0(llvm_bitsize(c, current_type) > llvm_bitsize(c, type)); return LLVMBuildTrunc(c->builder, data, type, "trunc"); } @@ -33,13 +33,13 @@ INLINE LLVMValueRef llvm_sext_trunc(GenContext *c, LLVMValueRef data, LLVMTypeRe { LLVMTypeRef current_type = LLVMTypeOf(data); if (current_type == type) return data; - assert(llvm_is_int_or_vector_int(type)); - assert(llvm_is_int_or_vector_int(current_type)); + ASSERT0(llvm_is_int_or_vector_int(type)); + ASSERT0(llvm_is_int_or_vector_int(current_type)); if (llvm_bitsize(c, current_type) < llvm_bitsize(c, type)) { return LLVMBuildSExt(c->builder, data, type, "sext"); } - assert(llvm_bitsize(c, current_type) > llvm_bitsize(c, type)); + ASSERT0(llvm_bitsize(c, current_type) > llvm_bitsize(c, type)); return LLVMBuildTrunc(c->builder, data, type, "trunc"); } @@ -59,7 +59,7 @@ INLINE void llvm_value_ext_trunc(GenContext *c, BEValue *value, Type *type) ByteSize size = type_size(from_type); ByteSize to_size = type_size(type); - assert(type_is_intlike(type) && type_is_intlike(from_type)); + ASSERT0(type_is_intlike(type) && type_is_intlike(from_type)); if (size == to_size) return; llvm_value_rvalue(c, value); @@ -84,20 +84,20 @@ INLINE LLVMValueRef llvm_store_decl(GenContext *c, Decl *decl, BEValue *value) { BEValue ref; llvm_value_set_decl(c, &ref, decl); - assert(llvm_value_is_addr(&ref)); + ASSERT0(llvm_value_is_addr(&ref)); return llvm_store(c, &ref, value); } INLINE LLVMValueRef llvm_store_raw(GenContext *c, BEValue *destination, LLVMValueRef raw_value) { - assert(llvm_value_is_addr(destination)); + ASSERT0(llvm_value_is_addr(destination)); return llvm_store_to_ptr_raw_aligned(c, destination->value, raw_value, destination->alignment); } INLINE LLVMValueRef llvm_store_decl_raw(GenContext *context, Decl *decl, LLVMValueRef value) { - assert(!decl->is_value); + ASSERT0(!decl->is_value); return llvm_store_to_ptr_raw_aligned(context, decl->backend_ref, value, decl->alignment); } @@ -122,7 +122,7 @@ INLINE LLVMValueRef llvm_store_to_ptr_raw(GenContext *c, LLVMValueRef pointer, L INLINE void llvm_value_bitcast(GenContext *c UNUSED, BEValue *value, Type *type) { - assert(llvm_value_is_addr(value)); + ASSERT0(llvm_value_is_addr(value)); type = type_lowering(type); value->type = type; } @@ -279,25 +279,25 @@ INLINE bool llvm_is_const(LLVMValueRef value) INLINE LLVMValueRef llvm_get_zstring(GenContext *c, const char *str, size_t len) { - assert(len == (unsigned)len); + ASSERT0(len == (unsigned)len); return LLVMConstStringInContext(c->context, str, (unsigned)len, 0); } INLINE LLVMValueRef llvm_get_bytes(GenContext *c, const char *str, size_t len) { - assert(len == (unsigned)len); + ASSERT0(len == (unsigned)len); return LLVMConstStringInContext(c->context, str, (unsigned)len, 1); } INLINE LLVMValueRef llvm_get_struct(GenContext *c, LLVMValueRef *vals, size_t len) { - assert(len == (unsigned)len); + ASSERT0(len == (unsigned)len); return LLVMConstStructInContext(c->context, vals, (unsigned)len, false); } INLINE LLVMValueRef llvm_get_packed_struct(GenContext *c, LLVMValueRef *vals, size_t len) { - assert(len == (unsigned)len); + ASSERT0(len == (unsigned)len); return LLVMConstStructInContext(c->context, vals, (unsigned)len, true); } @@ -324,7 +324,7 @@ INLINE LLVMValueRef llvm_get_struct_of_type(GenContext *c, Type *type, LLVMValue INLINE LLVMValueRef llvm_const_int(GenContext *c, Type *type, uint64_t val) { type = type_lowering(type); - assert(type_is_integer(type) || type->type_kind == TYPE_BOOL); + ASSERT0(type_is_integer(type) || type->type_kind == TYPE_BOOL); return LLVMConstInt(llvm_get_type(c, type), val, type_is_integer_signed(type)); } @@ -342,14 +342,14 @@ INLINE LLVMValueRef llvm_add_global_raw(GenContext *c, const char *name, LLVMTyp INLINE void llvm_emit_exprid(GenContext *c, BEValue *value, ExprId expr) { - assert(expr); + ASSERT0(expr); llvm_emit_expr(c, value, exprptr(expr)); } INLINE void llvm_set_alignment(LLVMValueRef alloca, AlignSize alignment) { - assert(alignment > 0); + ASSERT0(alignment > 0); LLVMSetAlignment(alloca, (unsigned)alignment); } diff --git a/src/compiler/llvm_codegen_module.c b/src/compiler/llvm_codegen_module.c index 23c4150e9..6959e3fa9 100644 --- a/src/compiler/llvm_codegen_module.c +++ b/src/compiler/llvm_codegen_module.c @@ -57,7 +57,7 @@ static void llvm_set_module_flag(GenContext *c, LLVMModuleFlagBehavior flag_beha void gencontext_begin_module(GenContext *c) { - assert(!c->module && "Expected no module"); + ASSERT0(!c->module && "Expected no module"); codegen_setup_object_names(c->code_module, &c->ir_filename, &c->asm_filename, &c->object_filename); diff --git a/src/compiler/llvm_codegen_stmt.c b/src/compiler/llvm_codegen_stmt.c index 03ef89d55..20efd6407 100644 --- a/src/compiler/llvm_codegen_stmt.c +++ b/src/compiler/llvm_codegen_stmt.c @@ -9,13 +9,13 @@ static void llvm_emit_switch_body(GenContext *c, BEValue *switch_value, Ast *swi // Emit a regular compound statement. void llvm_emit_compound_stmt(GenContext *c, Ast *ast) { - assert(ast->ast_kind == AST_COMPOUND_STMT); + ASSERT0(ast->ast_kind == AST_COMPOUND_STMT); DebugScope *old_block = NULL; if (ast->compound_stmt.parent_defer && llvm_use_debug(c)) { old_block = c->debug.block_stack; - assert(ast->compound_stmt.parent_defer); + ASSERT0(ast->compound_stmt.parent_defer); c->debug.block_stack = astptr(ast->compound_stmt.parent_defer)->defer_stmt.scope; } // Push the lexical scope if in debug. @@ -112,7 +112,7 @@ void llvm_emit_local_decl(GenContext *c, Decl *decl, BEValue *value) LLVMTypeRef alloc_type = llvm_get_type(c, var_type); // Create a local alloca - assert(!decl->backend_ref); + ASSERT0(!decl->backend_ref); llvm_emit_local_var_alloca(c, decl); // Create optional storage @@ -174,7 +174,7 @@ void llvm_emit_local_decl(GenContext *c, Decl *decl, BEValue *value) */ static void llvm_emit_cond(GenContext *c, BEValue *be_value, Expr *expr, bool bool_cast) { - assert(expr->expr_kind == EXPR_COND); + ASSERT0(expr->expr_kind == EXPR_COND); ByteSize size = vec_size(expr->cond_expr); // First emit everything up to the last element. @@ -313,7 +313,7 @@ static inline void llvm_emit_block_exit_return(GenContext *c, Ast *ast) { if (ast->return_stmt.cleanup_fail && IS_OPTIONAL(ret_expr)) { - assert(c->catch.block); + ASSERT0(c->catch.block); err_cleanup_block = llvm_basic_block_new(c, "opt_block_cleanup"); c->catch.block = err_cleanup_block; } @@ -412,7 +412,7 @@ static void llvm_emit_if_stmt(GenContext *c, Ast *ast) llvm_value_rvalue(c, &be_value); - assert(llvm_value_is_bool(&be_value)); + ASSERT0(llvm_value_is_bool(&be_value)); if (llvm_value_is_const(&be_value) && then_block != else_block) { @@ -493,7 +493,7 @@ static inline LoopType loop_type_for_cond(Expr *cond, bool do_while) // Do we have a constant cond? if (expr_is_const(cond)) { - assert(cond->const_expr.const_kind == CONST_BOOL); + ASSERT0(cond->const_expr.const_kind == CONST_BOOL); // The result is either infinite or no loop return cond->const_expr.b ? LOOP_INFINITE : LOOP_NONE; } @@ -550,7 +550,7 @@ void llvm_emit_for_stmt(GenContext *c, Ast *ast) return; } - assert(loop_start_block != NULL); + ASSERT0(loop_start_block != NULL); LLVMBasicBlockRef exit_block = llvm_basic_block_new(c, "loop.exit"); @@ -586,7 +586,7 @@ void llvm_emit_for_stmt(GenContext *c, Ast *ast) // Emit the block llvm_emit_block(c, cond_block); BEValue be_value; - assert(cond); + ASSERT0(cond); if (cond->expr_kind == EXPR_COND) { llvm_emit_cond(c, &be_value, cond, true); @@ -596,7 +596,7 @@ void llvm_emit_for_stmt(GenContext *c, Ast *ast) llvm_emit_expr(c, &be_value, cond); } llvm_value_rvalue(c, &be_value); - assert(llvm_value_is_bool(&be_value)); + ASSERT0(llvm_value_is_bool(&be_value)); // If we have a body, conditionally jump to it. LLVMBasicBlockRef cond_success = body_block ? body_block : inc_block; @@ -711,7 +711,7 @@ static void llvm_emit_switch_body_if_chain(GenContext *c, Expr *to_expr = exprptrzero(case_stmt->case_stmt.to_expr); if (to_expr) { - assert(!is_type_switch); + ASSERT0(!is_type_switch); BEValue to_value; llvm_emit_expr(c, &to_value, to_expr); llvm_value_rvalue(c, &to_value); @@ -769,7 +769,7 @@ static LLVMValueRef llvm_emit_switch_jump_stmt(GenContext *c, unsigned case_count = vec_size(cases); BEValue min_val; llvm_emit_expr(c, &min_val, exprptr(cases[min_index]->case_stmt.expr)); - assert(llvm_value_is_const(&min_val)); + ASSERT0(llvm_value_is_const(&min_val)); llvm_value_rvalue(c, switch_value); llvm_value_rvalue(c, &min_val); LLVMValueRef min = min_val.value; @@ -816,7 +816,7 @@ static void llvm_emit_switch_jump_table(GenContext *c, } Expr *from = exprptr(case_ast->case_stmt.expr); Expr *to = exprptrzero(case_ast->case_stmt.to_expr); - assert(type_is_integer(from->type) && expr_is_const(from)); + ASSERT0(type_is_integer(from->type) && expr_is_const(from)); Int value = from->const_expr.ixx; Int to_value = to ? to->const_expr.ixx : value; if (min.type == TYPE_VOID) @@ -838,9 +838,9 @@ static void llvm_emit_switch_jump_table(GenContext *c, switch_ast->switch_stmt.codegen.jump.default_index = default_index; switch_ast->switch_stmt.codegen.jump.min_index = min_index; max = int_sub(max, min); - assert(max.i.low <= 0xFFFF); + ASSERT0(max.i.low <= 0xFFFF); uint64_t count = switch_ast->switch_stmt.codegen.jump.count = max.i.low + 1; - assert(!max.i.high && "Should never exceed 64 bytes"); + ASSERT0(!max.i.high && "Should never exceed 64 bytes"); Type *goto_array_type = type_get_array(type_voidptr, count); LLVMTypeRef llvm_array_type = llvm_get_type(c, goto_array_type); @@ -857,7 +857,7 @@ static void llvm_emit_switch_jump_table(GenContext *c, static LLVMValueRef refs[DEFAULT_SWITCHRANGE_MAX_SIZE]; LLVMValueRef default_block_address = LLVMBlockAddress(c->cur_func.ref, default_block); - assert(count < DEFAULT_SWITCHRANGE_MAX_SIZE); + ASSERT0(count < DEFAULT_SWITCHRANGE_MAX_SIZE); memset(refs, 0, sizeof(LLVMValueRef) * count); for (unsigned i = 0; i < case_count; i++) { @@ -867,7 +867,7 @@ static void llvm_emit_switch_jump_table(GenContext *c, { Expr *from = exprptr(case_stmt->case_stmt.expr); Expr *to = exprptrzero(case_stmt->case_stmt.to_expr); - assert(type_is_integer(from->type) && expr_is_const(from)); + ASSERT0(type_is_integer(from->type) && expr_is_const(from)); Int value = int_sub(from->const_expr.ixx, min); Int to_value = to ? int_sub(to->const_expr.ixx, min) : value; uint64_t from_val = value.i.low; @@ -977,7 +977,7 @@ static void llvm_emit_switch_body(GenContext *c, BEValue *switch_value, Ast *swi llvm_emit_switch_jump_table(c, switch_ast, cases, default_case, &switch_current_val, exit_block); return; } - assert(!is_typeid); + ASSERT0(!is_typeid); LLVMValueRef switch_stmt = LLVMBuildSwitch(c->builder, switch_current_val.value, default_case ? default_case->case_stmt.backend_block : exit_block, case_count); c->current_block = NULL; @@ -990,7 +990,7 @@ static void llvm_emit_switch_body(GenContext *c, BEValue *switch_value, Ast *swi LLVMValueRef case_value; BEValue be_value; Expr *from = exprptr(case_stmt->case_stmt.expr); - assert(expr_is_const(from)); + ASSERT0(expr_is_const(from)); llvm_emit_expr(c, &be_value, from); llvm_value_rvalue(c, &be_value); case_value = be_value.value; @@ -1002,7 +1002,7 @@ static void llvm_emit_switch_body(GenContext *c, BEValue *switch_value, Ast *swi llvm_emit_expr(c, &to_value, to_expr); llvm_value_rvalue(c, &to_value); LLVMValueRef to = to_value.value; - assert(LLVMIsAConstant(to)); + ASSERT0(LLVMIsAConstant(to)); LLVMValueRef one = llvm_const_int(c, to_value.type, 1); while (LLVMConstIntGetZExtValue(LLVMBuildICmp(c->builder, LLVMIntEQ, to, case_value, "")) != 1) { @@ -1070,7 +1070,7 @@ void llvm_emit_break(GenContext *c, Ast *ast) void llvm_emit_continue(GenContext *c, Ast *ast) { - assert(ast->contbreak_stmt.is_resolved); + ASSERT0(ast->contbreak_stmt.is_resolved); llvm_emit_statement_chain(c, ast->contbreak_stmt.defers); Ast *jump_target = astptr(ast->contbreak_stmt.ast); LLVMBasicBlockRef jump; @@ -1166,7 +1166,7 @@ static inline void llvm_emit_assume(GenContext *c, Expr *expr) BEValue value; llvm_emit_expr(c, &value, expr); llvm_value_rvalue(c, &value); - assert(value.kind == BE_BOOLEAN); + ASSERT0(value.kind == BE_BOOLEAN); EMIT_LOC(c, expr); llvm_emit_assume_true(c, &value); } @@ -1183,7 +1183,7 @@ static inline void llvm_emit_assert_stmt(GenContext *c, Ast *ast) llvm_value_rvalue(c, &value); LLVMBasicBlockRef on_fail = llvm_basic_block_new(c, "assert_fail"); LLVMBasicBlockRef on_ok = llvm_basic_block_new(c, "assert_ok"); - assert(value.kind == BE_BOOLEAN); + ASSERT0(value.kind == BE_BOOLEAN); llvm_emit_cond_br(c, &value, on_ok, on_fail); llvm_emit_block(c, on_fail); SourceSpan loc = assert_expr->span; @@ -1304,7 +1304,7 @@ static inline void llvm_emit_asm_block_stmt(GenContext *c, Ast *ast) args[param_count++] = value.value; continue; } - assert(var->kind == ASM_ARG_REGVAR); + ASSERT0(var->kind == ASM_ARG_REGVAR); if (var->ident.early_clobber) { codegen_append_constraints(&clobber_list, "=&r"); @@ -1330,7 +1330,7 @@ static inline void llvm_emit_asm_block_stmt(GenContext *c, Ast *ast) value.kind = BE_VALUE; pointer_type[param_count] = llvm_get_type(c, value.type); value.type = type_get_ptr(value.type); - assert(!val->ident.copy_output); + ASSERT0(!val->ident.copy_output); codegen_append_constraints(&clobber_list, "*m"); break; case ASM_ARG_REGVAR: @@ -1565,7 +1565,7 @@ void llvm_emit_panic_if_true(GenContext *c, BEValue *value, const char *panic_na { if (LLVMIsAConstantInt(value->value)) { - assert(!LLVMConstIntGetZExtValue(value->value) && "Unexpected bounds check failed."); + ASSERT0(!LLVMConstIntGetZExtValue(value->value) && "Unexpected bounds check failed."); return; } LLVMBasicBlockRef panic_block = llvm_basic_block_new(c, "panic"); diff --git a/src/compiler/llvm_codegen_storeload.c b/src/compiler/llvm_codegen_storeload.c index b2affd9dc..02ddb2139 100644 --- a/src/compiler/llvm_codegen_storeload.c +++ b/src/compiler/llvm_codegen_storeload.c @@ -6,7 +6,7 @@ LLVMValueRef llvm_store_to_ptr_raw_aligned(GenContext *context, LLVMValueRef pointer, LLVMValueRef value, AlignSize alignment) { - assert(alignment > 0); + ASSERT0(alignment > 0); LLVMValueRef ref = LLVMBuildStore(context->builder, value, pointer); llvm_set_alignment(ref, alignment); return ref; @@ -36,7 +36,7 @@ bool llvm_temp_as_address(GenContext *c, Type *type) LLVMValueRef llvm_store_to_ptr_aligned(GenContext *c, LLVMValueRef destination, BEValue *value, AlignSize alignment) { // If we have an address but not an aggregate, do a load. - assert(alignment); + ASSERT0(alignment); llvm_value_fold_optional(c, value); if (value->kind == BE_ADDRESS && !type_is_abi_aggregate(value->type)) { @@ -66,16 +66,16 @@ LLVMValueRef llvm_store_to_ptr_aligned(GenContext *c, LLVMValueRef destination, LLVMValueRef llvm_store(GenContext *c, BEValue *destination, BEValue *value) { if (value->type == type_void) return NULL; - assert(!type_is_void(value->type)); - assert(llvm_value_is_addr(destination)); + ASSERT0(!type_is_void(value->type)); + ASSERT0(llvm_value_is_addr(destination)); return llvm_store_to_ptr_aligned(c, destination->value, value, destination->alignment); } LLVMValueRef llvm_load(GenContext *c, LLVMTypeRef type, LLVMValueRef pointer, AlignSize alignment, const char *name) { - assert(alignment > 0); - assert(!llvm_is_global_eval(c)); - assert(LLVMGetTypeContext(type) == c->context); + ASSERT0(alignment > 0); + ASSERT0(!llvm_is_global_eval(c)); + ASSERT0(LLVMGetTypeContext(type) == c->context); LLVMValueRef value = LLVMBuildLoad2(c->builder, type, pointer, name); llvm_set_alignment(value, alignment ? alignment : llvm_abi_alignment(c, type)); return value; diff --git a/src/compiler/llvm_codegen_type.c b/src/compiler/llvm_codegen_type.c index 20a43105d..24ca77994 100644 --- a/src/compiler/llvm_codegen_type.c +++ b/src/compiler/llvm_codegen_type.c @@ -277,7 +277,7 @@ LLVMTypeRef llvm_func_type(GenContext *context, FunctionPrototype *prototype) LLVMTypeRef llvm_get_pointee_type(GenContext *c, Type *any_type) { any_type = any_type->canonical; - assert(any_type->type_kind == TYPE_POINTER); + ASSERT0(any_type->type_kind == TYPE_POINTER); if (any_type == type_voidptr) return llvm_get_type(c, type_char); return llvm_get_type(c, any_type->pointer); } @@ -300,7 +300,7 @@ LLVMTypeRef llvm_get_type(GenContext *c, Type *any_type) { if (any_type->backend_type) { - assert(LLVMGetTypeContext(any_type->backend_type) == c->context && "Should have been purged"); + ASSERT0(LLVMGetTypeContext(any_type->backend_type) == c->context && "Should have been purged"); return any_type->backend_type; } Type *type = type_lowering(any_type); @@ -337,7 +337,7 @@ LLVMTypeRef llvm_get_type(GenContext *c, Type *any_type) return any_type->backend_type = LLVMIntTypeInContext(c->context, 8U); case TYPE_POINTER: case TYPE_FUNC_PTR: - assert(c->ptr_type); + ASSERT0(c->ptr_type); return any_type->backend_type = c->ptr_type; case TYPE_ARRAY: case TYPE_FLEXIBLE_ARRAY: @@ -370,7 +370,7 @@ LLVMTypeRef llvm_get_coerce_type(GenContext *c, ABIArgInfo *arg_info) case ABI_ARG_DIRECT_SPLIT_STRUCT_I32: { LLVMTypeRef coerce_type = llvm_get_type(c, type_uint); - assert(arg_info->direct_struct_expand > 1U && arg_info->direct_struct_expand < 10); + ASSERT0(arg_info->direct_struct_expand > 1U && arg_info->direct_struct_expand < 10); LLVMTypeRef refs[10]; for (unsigned i = 0; i < arg_info->direct_struct_expand; i++) { @@ -411,7 +411,7 @@ LLVMTypeRef llvm_abi_type(GenContext *c, AbiType type) static inline LLVMValueRef llvm_generate_temp_introspection_global(GenContext *c, Type *type) { - assert(!type->backend_typeid); + ASSERT0(!type->backend_typeid); LLVMValueRef temp = LLVMAddGlobal(c->module, c->introspect_type, "tempid"); type->backend_typeid = LLVMBuildPtrToInt(c->builder, temp, c->typeid_type, ""); return temp; @@ -426,9 +426,9 @@ static inline LLVMValueRef llvm_generate_introspection_global(GenContext *c, LLV if (original_global) { - assert(type->backend_typeid); + ASSERT0(type->backend_typeid); } - assert(type == type->canonical); + ASSERT0(type == type->canonical); Type *parent_type = type_find_parent_type(type); LLVMValueRef parent_typeid; LLVMValueRef global_name = NULL; diff --git a/src/compiler/llvm_codegen_value.c b/src/compiler/llvm_codegen_value.c index 56b9d7de9..375b3830b 100644 --- a/src/compiler/llvm_codegen_value.c +++ b/src/compiler/llvm_codegen_value.c @@ -11,7 +11,7 @@ void llvm_value_deref(GenContext *c, BEValue *value) void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type) { type = type_lowering(type); - assert(llvm_value || type == type_void); + ASSERT0(llvm_value || type == type_void); value->value = llvm_value; value->alignment = type_abi_alignment(type); value->kind = BE_VALUE; @@ -40,7 +40,7 @@ void llvm_value_set(BEValue *value, LLVMValueRef llvm_value, Type *type) void llvm_value_set_address(BEValue *value, LLVMValueRef llvm_value, Type *type, AlignSize alignment) { - assert(alignment > 0); + ASSERT0(alignment > 0); value->value = llvm_value; value->alignment = alignment; value->kind = BE_ADDRESS; @@ -111,7 +111,7 @@ void llvm_value_rvalue(GenContext *c, BEValue *value) void llvm_emit_jump_to_optional_exit(GenContext *c, LLVMValueRef opt_value) { - assert(c->catch.block && "unexpected emit"); + ASSERT0(c->catch.block && "unexpected emit"); bool is_constant_opt = llvm_is_const(opt_value); // Maybe we don't need to emit anything? @@ -159,7 +159,7 @@ void llvm_value_fold_optional(GenContext *c, BEValue *value) void llvm_value_set_decl_address(GenContext *c, BEValue *value, Decl *decl) { - assert(!decl->is_value); + ASSERT0(!decl->is_value); LLVMValueRef backend_ref = llvm_get_ref(c, decl); llvm_value_set_address(value, backend_ref, decl->type, decl->alignment); diff --git a/src/compiler/module.c b/src/compiler/module.c index afbef7992..b41181154 100644 --- a/src/compiler/module.c +++ b/src/compiler/module.c @@ -23,7 +23,7 @@ void scratch_buffer_append_module(Module *module, bool is_export) switch (c) { case ':': - assert(name[0] == ':'); + ASSERT0(name[0] == ':'); scratch_buffer_append_char(is_export ? '_' : '.'); name++; break; @@ -75,7 +75,7 @@ const char *module_create_object_file_name(Module *module) Path *path_create_from_string(const char *string, uint32_t len, SourceSpan span) { - assert(string); + ASSERT0(string); Path *path = CALLOCS(Path); path->span = span; TokenType type = TOKEN_IDENT; diff --git a/src/compiler/number.c b/src/compiler/number.c index 77be41c38..217b3cc87 100644 --- a/src/compiler/number.c +++ b/src/compiler/number.c @@ -56,14 +56,14 @@ void expr_contract_array(ExprConst *expr_const, ConstKind contract_type) *expr_const = (ExprConst) { .const_kind = contract_type }; return; } - assert(expr_const->const_kind == CONST_INITIALIZER || expr_const->const_kind == CONST_SLICE); + ASSERT0(expr_const->const_kind == CONST_INITIALIZER || expr_const->const_kind == CONST_SLICE); ConstInitializer *initializer = expr_const->const_kind == CONST_SLICE ? expr_const->slice_init : expr_const->initializer; Type *type = initializer->type; - assert(type_is_any_arraylike(type)); + ASSERT0(type_is_any_arraylike(type)); ArraySize len = type->array.len; - assert(len > 0); + ASSERT0(len > 0); char *arr = calloc_arena(len); switch (initializer->kind) { @@ -78,7 +78,7 @@ void expr_contract_array(ExprConst *expr_const, ConstKind contract_type) { FOREACH(ConstInitializer *, init, initializer->init_array.elements) { - assert(init->kind == CONST_INIT_ARRAY_VALUE); + ASSERT0(init->kind == CONST_INIT_ARRAY_VALUE); arr[init->init_array_value.index] = (char) int_to_i64(init->init_array_value.element->init_value->const_expr.ixx); } break; @@ -87,7 +87,7 @@ void expr_contract_array(ExprConst *expr_const, ConstKind contract_type) { FOREACH_IDX(i, ConstInitializer *, init, initializer->init_array_full) { - assert(init->kind == CONST_INIT_VALUE); + ASSERT0(init->kind == CONST_INIT_VALUE); arr[i] = (char)int_to_i64(init->init_value->const_expr.ixx); } break; @@ -133,10 +133,10 @@ bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp case CONST_BOOL: return compare_bool(left->b, right->b, op); case CONST_INTEGER: - assert(right->const_kind != CONST_ENUM); + ASSERT0(right->const_kind != CONST_ENUM); return int_comp(left->ixx, right->ixx, op); case CONST_REF: - assert(right->const_kind == CONST_POINTER || right->const_kind == CONST_REF); + ASSERT0(right->const_kind == CONST_POINTER || right->const_kind == CONST_REF); if (right->const_kind == CONST_POINTER) return false; return decl_flatten(right->global_ref) == decl_flatten(left->global_ref); case CONST_FLOAT: @@ -168,7 +168,7 @@ bool expr_const_compare(const ExprConst *left, const ExprConst *right, BinaryOp { Decl *left_decl = left->enum_err_val; // The error case - assert(right->const_kind == left->const_kind); + ASSERT0(right->const_kind == left->const_kind); Decl *right_decl = right->enum_err_val; // Non-matching cannot be compared. if (right_decl->type != left_decl->type) return false; @@ -255,7 +255,7 @@ bool expr_const_float_fits_type(const ExprConst *expr_const, TypeKind kind) default: UNREACHABLE } - assert(expr_const->const_kind == CONST_FLOAT); + ASSERT0(expr_const->const_kind == CONST_FLOAT); return expr_const->fxx.f >= -lo_limit && expr_const->fxx.f <= hi_limit; } diff --git a/src/compiler/parse_expr.c b/src/compiler/parse_expr.c index a994c74a1..b78a4b126 100644 --- a/src/compiler/parse_expr.c +++ b/src/compiler/parse_expr.c @@ -109,7 +109,7 @@ bool parse_generic_parameters(ParseContext *c, Expr ***exprs_ref) */ static Expr *parse_rethrow_expr(ParseContext *c, Expr *left_side) { - assert(expr_ok(left_side)); + ASSERT0(expr_ok(left_side)); advance_and_verify(c, TOKEN_BANG); Expr *expr = expr_new_expr(EXPR_RETHROW, left_side); expr->rethrow_expr.inner = left_side; @@ -404,7 +404,7 @@ static bool parse_param_path(ParseContext *c, DesignatorElement ***path) static Expr *parse_lambda(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_LAMBDA); advance_and_verify(c, TOKEN_FN); Decl *func = decl_calloc(); @@ -525,16 +525,7 @@ bool parse_arg_list(ParseContext *c, Expr ***result, TokenType param_end, bool v ASSIGN_EXPR_OR_RET(expr, parse_vasplat(c), false); goto DONE; } - if (try_consume(c, TOKEN_ELLIPSIS)) - { - expr = expr_new(EXPR_SPLAT, start_span); - ASSIGN_EXPR_OR_RET(expr->inner_expr, parse_expr(c), false); - RANGE_EXTEND_PREV(expr); - } - else - { - ASSIGN_EXPR_OR_RET(expr, parse_expr(c), false); - } + ASSIGN_EXPR_OR_RET(expr, parse_expr(c), false); DONE: vec_add(*result, expr); if (!try_consume(c, TOKEN_COMMA)) @@ -659,10 +650,19 @@ Expr *parse_ct_expression_list(ParseContext *c, bool allow_decl) * @param left must be null. * @return Expr* */ -static Expr *parse_type_identifier(ParseContext *context, Expr *left) +static Expr *parse_type_identifier(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); - return parse_type_expression_with_path(context, NULL); + ASSERT0(!left && "Unexpected left hand side"); + return parse_type_expression_with_path(c, NULL); +} + +static Expr *parse_splat(ParseContext *c, Expr *left) +{ + ASSERT0(!left && "Unexpected left hand side"); + Expr *expr = expr_new(EXPR_SPLAT, c->span); + advance_and_verify(c, TOKEN_ELLIPSIS); + ASSIGN_EXPR_OR_RET(expr->inner_expr, parse_expr(c), poisoned_expr); + return expr; } /** @@ -670,7 +670,7 @@ static Expr *parse_type_identifier(ParseContext *context, Expr *left) */ static Expr *parse_type_expr(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_TYPEINFO); ASSIGN_TYPE_OR_RET(TypeInfo *type, parse_optional_type(c), poisoned_expr); if (tok_is(c, TOKEN_LBRACE)) @@ -691,7 +691,7 @@ static Expr *parse_type_expr(ParseContext *c, Expr *left) static Expr *parse_ct_stringify(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); SourceSpan start_span = c->span; const char *start = c->lexer.current; advance(c); @@ -722,7 +722,7 @@ static Expr *parse_ct_stringify(ParseContext *c, Expr *left) */ static Expr *parse_unary_expr(ParseContext *c, Expr *left) { - assert(!left && "Did not expect a left hand side!"); + ASSERT0(!left && "Did not expect a left hand side!"); Expr *unary = EXPR_NEW_TOKEN(EXPR_UNARY); unary->unary_expr.operator = unaryop_from_token(c->tok); @@ -741,7 +741,7 @@ static Expr *parse_unary_expr(ParseContext *c, Expr *left) */ static Expr *parse_post_unary(ParseContext *c, Expr *left) { - assert(expr_ok(left)); + ASSERT0(expr_ok(left)); Expr *unary = expr_new_expr(EXPR_POST_UNARY, left); unary->unary_expr.expr = left; unary->unary_expr.operator = unaryop_from_token(c->tok); @@ -755,7 +755,7 @@ static Expr *parse_post_unary(ParseContext *c, Expr *left) */ static Expr *parse_elvis_expr(ParseContext *c, Expr *left_side) { - assert(expr_ok(left_side)); + ASSERT0(expr_ok(left_side)); Expr *expr_ternary = expr_new_expr(EXPR_TERNARY, left_side); expr_ternary->ternary_expr.cond = exprid(left_side); @@ -773,7 +773,7 @@ static Expr *parse_elvis_expr(ParseContext *c, Expr *left_side) */ static Expr *parse_ternary_expr(ParseContext *c, Expr *left_side) { - assert(expr_ok(left_side)); + ASSERT0(expr_ok(left_side)); Expr *expr = expr_new_expr(EXPR_TERNARY, left_side); advance_and_verify(c, TOKEN_QUESTION); @@ -812,7 +812,7 @@ static Expr *parse_ternary_expr(ParseContext *c, Expr *left_side) */ static Expr *parse_grouping_expr(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr; advance_and_verify(c, TOKEN_LPAREN); ASSIGN_EXPR_OR_RET(expr, parse_expr(c), poisoned_expr); @@ -870,7 +870,7 @@ static Expr *parse_grouping_expr(ParseContext *c, Expr *left) */ Expr *parse_initializer_list(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *initializer_list = EXPR_NEW_TOKEN(EXPR_INITIALIZER_LIST); advance_and_verify(c, TOKEN_LBRACE); if (!try_consume(c, TOKEN_RBRACE)) @@ -913,7 +913,7 @@ Expr *parse_initializer_list(ParseContext *c, Expr *left) static Expr *parse_orelse(ParseContext *c, Expr *left_side) { - assert(left_side && expr_ok(left_side)); + ASSERT0(left_side && expr_ok(left_side)); advance_and_verify(c, TOKEN_QUESTQUEST); @@ -932,7 +932,7 @@ static Expr *parse_orelse(ParseContext *c, Expr *left_side) static Expr *parse_binary(ParseContext *c, Expr *left_side) { - assert(left_side && expr_ok(left_side)); + ASSERT0(left_side && expr_ok(left_side)); // Remember the operator. TokenType operator_type = c->tok; @@ -961,7 +961,7 @@ static Expr *parse_binary(ParseContext *c, Expr *left_side) static Expr *parse_call_expr(ParseContext *c, Expr *left) { - assert(left && expr_ok(left)); + ASSERT0(left && expr_ok(left)); Expr **params = NULL; advance_and_verify(c, TOKEN_LPAREN); @@ -1058,7 +1058,7 @@ static Expr *parse_call_expr(ParseContext *c, Expr *left) */ static Expr *parse_subscript_expr(ParseContext *c, Expr *left) { - assert(left && expr_ok(left)); + ASSERT0(left && expr_ok(left)); advance_and_verify(c, TOKEN_LBRACKET); Expr *subs_expr = expr_new_expr(EXPR_SUBSCRIPT, left); @@ -1089,7 +1089,7 @@ static Expr *parse_subscript_expr(ParseContext *c, Expr *left) */ static Expr *parse_generic_expr(ParseContext *c, Expr *left) { - assert(left && expr_ok(left)); + ASSERT0(left && expr_ok(left)); Expr *subs_expr = expr_new_expr(EXPR_GENERIC_IDENT, left); subs_expr->generic_ident_expr.parent = exprid(left); if (!parse_generic_parameters(c, &subs_expr->generic_ident_expr.parmeters)) return poisoned_expr; @@ -1102,7 +1102,7 @@ static Expr *parse_generic_expr(ParseContext *c, Expr *left) */ static Expr *parse_access_expr(ParseContext *c, Expr *left) { - assert(left && expr_ok(left)); + ASSERT0(left && expr_ok(left)); advance_and_verify(c, TOKEN_DOT); Expr *access_expr = expr_new_expr(EXPR_ACCESS, left); access_expr->access_expr.parent = left; @@ -1113,7 +1113,7 @@ static Expr *parse_access_expr(ParseContext *c, Expr *left) static Expr *parse_ct_ident(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); if (try_consume(c, TOKEN_CT_CONST_IDENT)) { PRINT_ERROR_LAST("Compile time identifiers may not be constants."); @@ -1128,7 +1128,7 @@ static Expr *parse_ct_ident(ParseContext *c, Expr *left) static Expr *parse_hash_ident(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_HASH_IDENT); expr->ct_ident_expr.identifier = symstr(c); advance_and_verify(c, TOKEN_HASH_IDENT); @@ -1141,7 +1141,7 @@ static Expr *parse_hash_ident(ParseContext *c, Expr *left) */ static Expr *parse_ct_eval(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_EVAL); advance(c); CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr); @@ -1154,7 +1154,7 @@ static Expr *parse_ct_eval(ParseContext *c, Expr *left) static Expr *parse_ct_defined(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *defined = expr_new(EXPR_CT_DEFINED, c->span); advance(c); CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr); @@ -1169,7 +1169,7 @@ static Expr *parse_ct_defined(ParseContext *c, Expr *left) */ static Expr *parse_ct_sizeof(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *access = expr_new(EXPR_ACCESS, c->span); advance(c); CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr); @@ -1194,7 +1194,7 @@ static Expr *parse_ct_sizeof(ParseContext *c, Expr *left) */ static Expr *parse_ct_is_const(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *checks = expr_new(EXPR_CT_IS_CONST, c->span); advance_and_verify(c, TOKEN_CT_IS_CONST); CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr); @@ -1209,7 +1209,7 @@ static Expr *parse_ct_is_const(ParseContext *c, Expr *left) */ static Expr *parse_ct_embed(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *embed = expr_new(EXPR_EMBED, c->span); advance_and_verify(c, TOKEN_CT_EMBED); CONSUME_OR_RET(TOKEN_LPAREN, poisoned_expr); @@ -1226,7 +1226,7 @@ static Expr *parse_ct_embed(ParseContext *c, Expr *left) static Expr *parse_ct_concat_append(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(tok_is(c, TOKEN_CT_CONCATFN) ? EXPR_CT_CONCAT : EXPR_CT_APPEND); SEMA_DEPRECATED(expr, "'%s' is deprecated in favour of '+++'.", symstr(c)); advance(c); @@ -1244,7 +1244,7 @@ static Expr *parse_ct_concat_append(ParseContext *c, Expr *left) */ static Expr *parse_ct_call(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_CALL); expr->ct_call_expr.token_type = c->tok; advance(c); @@ -1261,7 +1261,7 @@ static Expr *parse_ct_call(ParseContext *c, Expr *left) static Expr *parse_ct_and_or(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_AND_OR); expr->ct_and_or_expr.is_and = tok_is(c, TOKEN_CT_ANDFN); SEMA_DEPRECATED(expr, "The use of '%s' is deprecated in favour of '%s'.", symstr(c), @@ -1275,7 +1275,7 @@ static Expr *parse_ct_and_or(ParseContext *c, Expr *left) static Expr *parse_ct_castable(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_CASTABLE); expr->castable_expr.is_assign = c->tok == TOKEN_CT_ASSIGNABLE; advance(c); @@ -1293,10 +1293,10 @@ static Expr *parse_ct_castable(ParseContext *c, Expr *left) */ static Expr *parse_ct_arg(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_CT_ARG); TokenType type = expr->ct_arg_expr.type = c->tok; - assert(type != TOKEN_CT_VATYPE); + ASSERT0(type != TOKEN_CT_VATYPE); advance(c); if (type != TOKEN_CT_VACOUNT) { @@ -1322,7 +1322,7 @@ static Expr *parse_ct_arg(ParseContext *c, Expr *left) */ static Expr *parse_identifier(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); if (symstr(c) == kw_return) { Expr *expr = EXPR_NEW_TOKEN(EXPR_RETVAL); @@ -1339,7 +1339,7 @@ static Expr *parse_identifier(ParseContext *c, Expr *left) static Expr *parse_identifier_starting_expression(ParseContext *c, Expr *left) { - assert(!left && "Unexpected left hand side"); + ASSERT0(!left && "Unexpected left hand side"); bool had_error; Path *path; if (!parse_path_prefix(c, &path)) return poisoned_expr; @@ -1383,7 +1383,7 @@ static Expr *parse_force_unwrap_expr(ParseContext *c, Expr *left) */ static Expr *parse_builtin(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_BUILTIN); if (!token_is_some_ident(peek(c))) { @@ -1435,7 +1435,7 @@ int read_int_suffix(const char *string, int loc, int len, char c) Expr *parse_integer(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *expr_int = EXPR_NEW_TOKEN(EXPR_CONST); expr_int->resolve_status = RESOLVE_DONE; size_t len = c->data.lex_len; @@ -1668,7 +1668,7 @@ Expr *parse_integer(ParseContext *c, Expr *left) static void parse_hex(char *result_pointer, const char *data, const char *end) { char *data_current = result_pointer; - assert(data_current); + ASSERT0(data_current); while (data < end) { int val, val2; @@ -1700,7 +1700,7 @@ static char base64_to_sextet(char c) static void parse_base64(char *result_pointer, char *result_pointer_end, const char *data, const char *end) { char *data_current = result_pointer; - assert(data_current); + ASSERT0(data_current); while (data < end) { int val, val2, val3, val4; @@ -1718,7 +1718,7 @@ static void parse_base64(char *result_pointer, char *result_pointer_end, const c static Expr *parse_bytes_expr(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); ArraySize len = 0; char *data = NULL; while (c->tok == TOKEN_BYTES) @@ -1766,7 +1766,7 @@ static Expr *parse_bytes_expr(ParseContext *c, Expr *left) */ static Expr *parse_char_lit(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *expr_int = EXPR_NEW_TOKEN(EXPR_CONST); expr_int->const_expr.is_character = true; expr_int->resolve_status = RESOLVE_DONE; @@ -1808,7 +1808,7 @@ static Expr *parse_char_lit(ParseContext *c, Expr *left) */ static Expr *parse_double(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); char *err; Expr *number = EXPR_NEW_TOKEN(EXPR_CONST); const char *original = symstr(c); @@ -1853,7 +1853,7 @@ static Expr *parse_double(ParseContext *c, Expr *left) */ static Expr *parse_string_literal(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *expr_string = EXPR_NEW_TOKEN(EXPR_CONST); const char *str = symstr(c); @@ -1886,7 +1886,7 @@ static Expr *parse_string_literal(ParseContext *c, Expr *left) PRINT_ERROR_HERE("String exceeded max size."); return poisoned_expr; } - assert(str); + ASSERT0(str); expr_string->const_expr.bytes.ptr = str; expr_string->const_expr.bytes.len = (uint32_t)len; expr_string->type = type_string; @@ -1900,7 +1900,7 @@ static Expr *parse_string_literal(ParseContext *c, Expr *left) */ static Expr *parse_bool(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *number = EXPR_NEW_TOKEN(EXPR_CONST); number->const_expr = (ExprConst) { .b = tok_is(c, TOKEN_TRUE), .const_kind = CONST_BOOL }; number->type = type_bool; @@ -1914,7 +1914,7 @@ static Expr *parse_bool(ParseContext *c, Expr *left) */ static Expr *parse_null(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *number = EXPR_NEW_TOKEN(EXPR_CONST); number->const_expr.const_kind = CONST_POINTER; number->const_expr.ptr = 0; @@ -1980,7 +1980,7 @@ Expr *parse_type_expression_with_path(ParseContext *c, Path *path) */ static Expr* parse_expr_block(ParseContext *c, Expr *left) { - assert(!left && "Had left hand side"); + ASSERT0(!left && "Had left hand side"); Expr *expr = EXPR_NEW_TOKEN(EXPR_EXPR_BLOCK); advance_and_verify(c, TOKEN_LBRAPIPE); AstId *next = &expr->expr_block.first_stmt; @@ -2084,7 +2084,7 @@ ParseRule rules[TOKEN_EOF + 1] = { [TOKEN_HASH_IDENT] = { parse_hash_ident, NULL, PREC_NONE }, [TOKEN_AT_IDENT] = { parse_identifier, NULL, PREC_NONE }, //[TOKEN_HASH_TYPE_IDENT] = { parse_type_identifier, NULL, PREC_NONE } - + [TOKEN_ELLIPSIS] = { parse_splat, NULL, PREC_NONE }, [TOKEN_FN] = { parse_lambda, NULL, PREC_NONE }, [TOKEN_CT_CONCATFN] = {parse_ct_concat_append, NULL, PREC_NONE }, [TOKEN_CT_APPEND] = { parse_ct_concat_append, NULL, PREC_NONE }, diff --git a/src/compiler/parse_global.c b/src/compiler/parse_global.c index 71c739bb7..1ce3fa4fa 100644 --- a/src/compiler/parse_global.c +++ b/src/compiler/parse_global.c @@ -93,7 +93,7 @@ INLINE bool parse_decl_initializer(ParseContext *c, Decl *decl) */ static inline Path *parse_module_path(ParseContext *c) { - assert(tok_is(c, TOKEN_IDENT)); + ASSERT0(tok_is(c, TOKEN_IDENT)); scratch_buffer_clear(); SourceSpan span = c->span; while (1) @@ -236,7 +236,7 @@ bool parse_module(ParseContext *c, AstId contracts) { Ast *current = astptr(contracts); contracts = current->next; - assert(current->ast_kind == AST_CONTRACT); + ASSERT0(current->ast_kind == AST_CONTRACT); switch (current->contract_stmt.kind) { case CONTRACT_UNKNOWN: @@ -495,7 +495,7 @@ static inline TypeInfo *parse_base_type(ParseContext *c) */ static inline TypeInfo *parse_generic_type(ParseContext *c, TypeInfo *type) { - assert(type_info_ok(type)); + ASSERT0(type_info_ok(type)); TypeInfo *generic_type = type_info_new(TYPE_INFO_GENERIC, type->span); if (!parse_generic_parameters(c, &generic_type->generic.params)) return poisoned_type_info; generic_type->generic.base = type; @@ -510,7 +510,7 @@ static inline TypeInfo *parse_generic_type(ParseContext *c, TypeInfo *type) */ static inline TypeInfo *parse_array_type_index(ParseContext *c, TypeInfo *type) { - assert(type_info_ok(type)); + ASSERT0(type_info_ok(type)); advance_and_verify(c, TOKEN_LBRACKET); if (try_consume(c, TOKEN_STAR)) @@ -568,7 +568,7 @@ DIRECT_SLICE:; */ static inline TypeInfo *parse_vector_type_index(ParseContext *c, TypeInfo *type) { - assert(type_info_ok(type)); + ASSERT0(type_info_ok(type)); advance_and_verify(c, TOKEN_LVEC); TypeInfo *vector = type_info_new(TYPE_INFO_VECTOR, type->span); @@ -625,7 +625,7 @@ TypeInfo *parse_type_with_base(ParseContext *c, TypeInfo *type_info) default: { TypeInfo *ptr_type = type_info_new(TYPE_INFO_POINTER, type_info->span); - assert(type_info); + ASSERT0(type_info); ptr_type->pointer = type_info; type_info = ptr_type; RANGE_EXTEND_PREV(type_info); @@ -634,7 +634,7 @@ TypeInfo *parse_type_with_base(ParseContext *c, TypeInfo *type_info) } if (type_info->resolve_status == RESOLVE_DONE) { - assert(type_info->type); + ASSERT0(type_info->type); type_info->type = type_get_ptr(type_info->type); } RANGE_EXTEND_PREV(type_info); @@ -734,7 +734,7 @@ TypeInfo *parse_optional_type(ParseContext *c) if (try_consume(c, TOKEN_BANG)) { if (!parse_rethrow_bracket(c, info->span)) return poisoned_type_info; - assert(!info->optional); + ASSERT0(!info->optional); info->optional = true; if (info->resolve_status == RESOLVE_DONE) { @@ -1525,7 +1525,7 @@ bool parse_struct_body(ParseContext *c, Decl *parent) { CONSUME_OR_RET(TOKEN_LBRACE, false); - assert(decl_is_struct_type(parent)); + ASSERT0(decl_is_struct_type(parent)); ArrayIndex index = 0; while (!tok_is(c, TOKEN_RBRACE)) { @@ -1621,7 +1621,7 @@ bool parse_struct_body(ParseContext *c, Decl *parent) { Decl *member = members[i]; if (is_cond) member->is_cond = true; - assert(!member->attributes); + ASSERT0(!member->attributes); member->attributes = copy_attributes_single(attributes); } } @@ -1662,7 +1662,7 @@ static inline Decl *parse_distinct_declaration(ParseContext *c) // 2. Now parse the type which we know is here. ASSIGN_TYPE_OR_RET(decl->distinct, parse_type(c), poisoned_decl); - assert(!tok_is(c, TOKEN_LGENPAR)); + ASSERT0(!tok_is(c, TOKEN_LGENPAR)); RANGE_EXTEND_PREV(decl); CONSUME_EOS_OR_RET(poisoned_decl); @@ -1953,7 +1953,7 @@ static inline Decl *parse_def_type(ParseContext *c) PRINT_ERROR_HERE("Expected a type to alias here."); return poisoned_decl; } - assert(!tok_is(c, TOKEN_LGENPAR)); + ASSERT0(!tok_is(c, TOKEN_LGENPAR)); decl->typedef_decl.type_info = type_info; decl->typedef_decl.is_func = false; @@ -2250,7 +2250,7 @@ static inline bool parse_enum_param_list(ParseContext *c, Decl*** parameters_ref { if (!parse_enum_param_decl(c, parameters_ref)) return false; Decl *last_parameter = VECLAST(*parameters_ref); - assert(last_parameter); + ASSERT0(last_parameter); last_parameter->var.index = vec_size(*parameters_ref) - 1; // NOLINT if (!try_consume(c, TOKEN_COMMA)) { @@ -2939,7 +2939,7 @@ Decl *parse_top_level_statement(ParseContext *c, ParseContext **c_ref) return poisoned_decl; } if (!decl_ok(decl)) return decl; - assert(decl); + ASSERT0(decl); return decl; CONTRACT_NOT_ALLOWED: RETURN_PRINT_ERROR_AT(poisoned_decl, astptr(contracts), "Contracts are only used for modules, functions and macros."); diff --git a/src/compiler/parse_stmt.c b/src/compiler/parse_stmt.c index 58a9720f6..2e98fac2d 100644 --- a/src/compiler/parse_stmt.c +++ b/src/compiler/parse_stmt.c @@ -53,7 +53,7 @@ static Ast *parse_decl_stmt_after_type(ParseContext *c, TypeInfo *type) } if (decl->attributes) { - assert(VECLAST(decl->attributes)); + ASSERT0(VECLAST(decl->attributes)); PRINT_ERROR_AT(VECLAST(decl->attributes), "Multiple variable declarations must have attributes at the end."); return poisoned_ast; } @@ -76,7 +76,7 @@ static Ast *parse_decl_stmt_after_type(ParseContext *c, TypeInfo *type) { if (tok_is(c, TOKEN_COMMA)) { - assert(VECLAST(decl->attributes)); + ASSERT0(VECLAST(decl->attributes)); PRINT_ERROR_AT(VECLAST(decl->attributes), "Multiple variable declarations must have attributes at the end."); return poisoned_ast; } @@ -171,7 +171,7 @@ static inline bool parse_asm_offset(ParseContext *c, ExprAsmArg *asm_arg) return false; } Expr *offset = parse_integer(c, NULL); - assert(expr_is_const_int(offset)); + ASSERT0(expr_is_const_int(offset)); Int i = offset->const_expr.ixx; if (i.i.high) { @@ -190,7 +190,7 @@ static inline bool parse_asm_scale(ParseContext *c, ExprAsmArg *asm_arg) return false; } Expr *value = parse_integer(c, NULL); - assert(expr_is_const_int(value)); + ASSERT0(expr_is_const_int(value)); Int i = value->const_expr.ixx; if (i.i.high) { diff --git a/src/compiler/sema_asm.c b/src/compiler/sema_asm.c index d9ec5227d..07de529d3 100644 --- a/src/compiler/sema_asm.c +++ b/src/compiler/sema_asm.c @@ -61,7 +61,7 @@ static inline AsmArgGroup sema_ireg_for_type(Type *type) */ static inline bool sema_reg_int_suported_type(AsmArgType arg, Type *type) { - assert(type_flatten(type) == type); + ASSERT0(type_flatten(type) == type); unsigned bits = type_bit_size(type); return next_highest_power_of_2(arg_bits_max(arg.ireg_bits, bits)) == bits; } @@ -83,7 +83,7 @@ INLINE bool sema_reg_is_valid_in_slot(AsmRegister *reg, AsmArgType arg_type) static inline bool sema_reg_float_suported_type(AsmArgType arg, Type *type) { - assert(type_flatten(type) == type); + ASSERT0(type_flatten(type) == type); if (!arg.float_bits) return false; return type_bit_size(type) == next_highest_power_of_2(arg_bits_max(arg.float_bits, 0)); } @@ -116,7 +116,7 @@ static inline bool sema_check_npot_imm_fits(Int imm, AsmArgType arg_type) } return true; } - assert(arg_type.imm_arg_ubits > 0); + ASSERT0(arg_type.imm_arg_ubits > 0); if (arg_type.imm_arg_ubits & ARG_BITS_20) { if (!direct_compare) return false; @@ -189,7 +189,7 @@ static inline bool sema_check_asm_arg_addr(SemaContext *context, AsmInlineBlock } ExprAsmArg *asm_arg = &expr->expr_asm_arg; Expr *base = exprptr(asm_arg->base); - assert(base->expr_kind == EXPR_ASM); + ASSERT0(base->expr_kind == EXPR_ASM); ExprAsmArg *base_arg = &base->expr_asm_arg; AsmArgType any_ireg = { .ireg_bits = (AsmArgBits)0xFF }; unsigned bit_size = 0; @@ -359,7 +359,7 @@ static inline bool sema_check_asm_var(SemaContext *context, AsmInlineBlock *bloc Decl *decl = sema_resolve_symbol(context, name, NULL, expr->span); if (!decl) return false; - assert(arg->kind == ASM_ARG_REGVAR); + ASSERT0(arg->kind == ASM_ARG_REGVAR); arg->ident.ident_decl = decl; if (decl->decl_kind != DECL_VAR) { @@ -414,7 +414,7 @@ static inline bool sema_check_asm_var(SemaContext *context, AsmInlineBlock *bloc if (!sema_reg_int_suported_type(arg_type, type)) { unsigned bits = arg_bits_max(arg_type.ireg_bits, 0); - assert(bits); + ASSERT0(bits); SEMA_ERROR(expr, "%s is not supported in this position, convert it to a valid type, like %s.", type_quoted_error_string(decl->type), type_quoted_error_string(type_int_signed_by_bitsize(bits))); return false; @@ -452,7 +452,7 @@ static inline bool sema_check_asm_memvar(SemaContext *context, AsmInlineBlock *b const char *name = arg->ident.name; Decl *decl = sema_resolve_symbol(context, name, NULL, expr->span); if (!decl) return false; - assert(arg->kind == ASM_ARG_MEMVAR); + ASSERT0(arg->kind == ASM_ARG_MEMVAR); arg->ident.ident_decl = decl; if (decl->decl_kind != DECL_VAR) { @@ -551,7 +551,7 @@ static inline bool sema_check_asm_arg(SemaContext *context, AsmInlineBlock *bloc bool sema_analyse_asm(SemaContext *context, AsmInlineBlock *block, Ast *asm_stmt) { - assert(compiler.platform.asm_initialized); + ASSERT0(compiler.platform.asm_initialized); AsmInstruction *instr = asm_instr_by_name(asm_stmt->asm_stmt.instruction); if (!instr) RETURN_SEMA_ERROR(asm_stmt, "Unknown instruction"); diff --git a/src/compiler/sema_builtins.c b/src/compiler/sema_builtins.c index cb64544a4..5924b0375 100644 --- a/src/compiler/sema_builtins.c +++ b/src/compiler/sema_builtins.c @@ -52,7 +52,7 @@ static bool sema_expr_is_valid_mask_for_value(SemaContext *context, Expr *expr, */ static bool sema_check_builtin_args_match(SemaContext *context, Expr **args, size_t arg_len) { - assert(arg_len > 1); + ASSERT0(arg_len > 1); Type *first = type_no_optional(args[0]->type->canonical); for (size_t i = 1; i < arg_len; i++) { @@ -444,7 +444,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) switch (func) { case BUILTIN_SET_ROUNDING_MODE: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTEGER}, 1)) return false; rtype = type_void; break; @@ -461,7 +461,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_VECCOMPLT: case BUILTIN_VECCOMPNE: { - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_NUMVEC, BA_NUMVEC}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; Type *vec_type = type_flatten(args[0]->type); @@ -469,7 +469,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) break; } case BUILTIN_SELECT: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_BOOLVEC, BA_VEC, BA_VEC}, 3)) return false; if (!sema_check_builtin_args_match(context, &args[1], 2)) return false; rtype = args[1]->type; @@ -478,7 +478,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_OVERFLOW_ADD: case BUILTIN_OVERFLOW_MUL: case BUILTIN_OVERFLOW_SUB: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTEGER, BA_INTEGER, BA_POINTER}, 3)) return false; @@ -496,23 +496,23 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_EXACT_MUL: case BUILTIN_EXACT_SUB: case BUILTIN_EXACT_MOD: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTEGER, BA_INTEGER}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; rtype = args[0]->type->canonical; break; case BUILTIN_ANY_MAKE: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_TYPEID}, 2)) return false; rtype = type_any; break; case BUILTIN_EXACT_NEG: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTLIKE}, 1)) return false; rtype = args[0]->type->canonical; break; case BUILTIN_MEMCOPY_INLINE: - assert(arg_count == 6); + ASSERT0(arg_count == 6); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_POINTER, BA_SIZE, BA_BOOL, BA_SIZE, BA_SIZE}, 6)) return false; @@ -521,7 +521,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) break; case BUILTIN_MEMCOPY: case BUILTIN_MEMMOVE: - assert(arg_count == 6); + ASSERT0(arg_count == 6); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_POINTER, BA_SIZE, BA_BOOL, BA_SIZE, BA_SIZE}, 6)) return false; @@ -529,14 +529,14 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) rtype = type_void; break; case BUILTIN_MEMSET: - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_CHAR, BA_SIZE, BA_BOOL, BA_SIZE}, 5)) return false; if (!sema_check_builtin_args_const(context, &args[3], 2)) return false; rtype = type_void; break; case BUILTIN_MEMSET_INLINE: - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_CHAR, BA_SIZE, BA_BOOL, BA_SIZE}, 5)) return false; if (!sema_check_builtin_args_const(context, &args[2], 3)) return false; @@ -547,31 +547,31 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_CTLZ: case BUILTIN_POPCOUNT: case BUILTIN_CTTZ: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTLIKE}, 1)) return false; rtype = args[0]->type; break; case BUILTIN_SAT_SHL: case BUILTIN_SAT_SUB: case BUILTIN_SAT_ADD: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTLIKE, BA_INTLIKE}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; rtype = args[0]->type; break; case BUILTIN_REVERSE: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_VEC}, 1)) return false; rtype = args[0]->type; break; case BUILTIN_EXPECT: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_BOOLINT, BA_BOOLINT}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; rtype = args[0]->type; break; case BUILTIN_EXPECT_WITH_PROBABILITY: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_BOOLINT, BA_BOOLINT}, 2)) return false; if (!cast_implicit(context, args[2], type_double, false)) { @@ -609,32 +609,32 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_SIN: case BUILTIN_SQRT: case BUILTIN_TRUNC: - assert(arg_count); + ASSERT0(arg_count); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOATLIKE, BA_FLOATLIKE, BA_FLOATLIKE}, arg_count)) return false; rtype = args[0]->type; break; case BUILTIN_FRAMEADDRESS: case BUILTIN_RETURNADDRESS: - assert(arg_count); + ASSERT0(arg_count); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTEGER}, arg_count)) return false; if (!cast_implicit(context, args[0], type_int, false)) return false; if (!expr_is_const_int(args[0])) RETURN_SEMA_ERROR(args[0], "Expected a compile time constant integer."); rtype = type_voidptr; break; case BUILTIN_WASM_MEMORY_SIZE: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!cast_implicit(context, args[0], type_uint, false)) return false; rtype = type_uptr; break; case BUILTIN_WASM_MEMORY_GROW: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!cast_implicit(context, args[0], type_uint, false)) return false; if (!cast_implicit(context, args[1], type_uptr, false)) return false; rtype = type_iptr; break; case BUILTIN_PREFETCH: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER, BA_INTEGER}, 3)) return false; for (unsigned i = 1; i < 3; i++) { @@ -655,20 +655,20 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) rtype = type_void; break; case BUILTIN_POW: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOATLIKE, BA_FLOATLIKE}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; rtype = args[0]->type; break; case BUILTIN_POW_INT: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOATLIKE, BA_INTLIKE}, 2)) return false; if (!cast_implicit(context, args[1], type_cint, false)) return false; rtype = args[0]->type; break; case BUILTIN_REDUCE_FMUL: case BUILTIN_REDUCE_FADD: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOATVEC, BA_FLOAT}, 2)) return false; if (!cast_implicit(context, args[1], args[0]->type->canonical->array.base, false)) return false; { @@ -681,7 +681,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_REDUCE_MAX: case BUILTIN_REDUCE_MIN: { - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_NUMVEC}, 1)) return false; rtype = type_get_indexed_type(args[0]->type); break; @@ -691,18 +691,18 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_REDUCE_OR: case BUILTIN_REDUCE_XOR: case BUILTIN_REDUCE_MUL: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_BOOLINTVEC}, 1)) return false; rtype = type_get_indexed_type(args[0]->type); break; case BUILTIN_ABS: - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_NUMLIKE}, 1)) return false; rtype = args[0]->type; break; case BUILTIN_GATHER: { - assert(arg_count == 4); + ASSERT0(arg_count == 4); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_PTRVEC, BA_BOOLVEC, BA_VEC, BA_INTEGER}, 4)) return false; Type *flat_pointer_vec = type_flatten(args[0]->type); Type *flat_passthru_vec = type_flatten(args[2]->type); @@ -726,7 +726,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_SCATTER: { - assert(arg_count == 4); + ASSERT0(arg_count == 4); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_PTRVEC, BA_VEC, BA_BOOLVEC, BA_INTEGER}, 4)) return false; Type *flat_pointer_vec = type_flatten(args[0]->type); Type *flat_value_vec = type_flatten(args[1]->type); @@ -749,7 +749,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_MASKED_LOAD: { - assert(arg_count == 4); + ASSERT0(arg_count == 4); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_BOOLVEC, BA_VEC, BA_INTEGER}, 4)) return false; Type *pointer_type = args[0]->type; if (!type_is_pointer(pointer_type)) RETURN_SEMA_ERROR(args[0], "Expected a direct pointer."); @@ -764,7 +764,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_MASKED_STORE: { - assert(arg_count == 4); + ASSERT0(arg_count == 4); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_VEC, BA_BOOLVEC, BA_INTEGER}, 4)) return false; Type *pointer_type = args[0]->type; if (!type_is_pointer(pointer_type)) RETURN_SEMA_ERROR(args[0], "Expected a direct pointer."); @@ -779,13 +779,13 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_MAX: case BUILTIN_MIN: - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_NUMLIKE, BA_NUMLIKE}, 2)) return false; if (!sema_check_builtin_args_match(context, args, 2)) return false; rtype = args[0]->type; break; case BUILTIN_FMA: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOATLIKE, BA_FLOATLIKE, BA_FLOATLIKE}, 3)) return false; @@ -794,14 +794,14 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) break; case BUILTIN_FSHL: case BUILTIN_FSHR: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_INTLIKE, BA_INTLIKE, BA_INTLIKE}, 3)) return false; if (!sema_check_builtin_args_match(context, args, 3)) return false; rtype = args[0]->type; break; case BUILTIN_FMULADD: - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_FLOAT, BA_FLOAT, BA_FLOAT}, 3)) return false; if (!sema_check_builtin_args_match(context, args, 3)) return false; @@ -809,7 +809,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) break; case BUILTIN_ATOMIC_LOAD: { - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_BOOL, BA_INTEGER}, 3)) return false; Type *original = type_flatten(args[0]->type); if (original == type_voidptr) RETURN_SEMA_ERROR(args[0], "Expected a typed pointer."); @@ -827,7 +827,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_UNALIGNED_LOAD: { - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER}, 2)) return false; Type *original = type_flatten(args[0]->type); if (original == type_voidptr) RETURN_SEMA_ERROR(args[0], "Expected a typed pointer."); @@ -837,7 +837,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_UNALIGNED_STORE: { - assert(arg_count == 3); + ASSERT0(arg_count == 3); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false; if (!sema_check_builtin_args(context, &args[2], (BuiltinArg[]) {BA_INTEGER}, 1)) return false; Type *original = type_flatten(args[0]->type); @@ -851,7 +851,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_VOLATILE_LOAD: { - assert(arg_count == 1); + ASSERT0(arg_count == 1); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false; Type *original = type_flatten(args[0]->type); if (original == type_voidptr) RETURN_SEMA_ERROR(args[0], "Expected a typed pointer."); @@ -860,7 +860,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_VOLATILE_STORE: { - assert(arg_count == 2); + ASSERT0(arg_count == 2); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false; Type *original = type_flatten(args[0]->type); if (original != type_voidptr) @@ -873,7 +873,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_ATOMIC_FETCH_INC_WRAP: case BUILTIN_ATOMIC_FETCH_DEC_WRAP: { - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER}, 2)) return false; Type *original = type_flatten(args[0]->type); Type *val = type_flatten(args[1]->type); @@ -907,7 +907,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_ATOMIC_FETCH_OR: case BUILTIN_ATOMIC_FETCH_XOR: { - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_INTEGER}, 2)) return false; Type *original = type_flatten(args[0]->type); if (original != type_voidptr) @@ -928,7 +928,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_ATOMIC_FETCH_EXCHANGE: { - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false; Type *original = type_flatten(args[0]->type); if (original != type_voidptr) @@ -954,7 +954,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) case BUILTIN_ATOMIC_FETCH_MAX: case BUILTIN_ATOMIC_FETCH_MIN: { - assert(arg_count == 5); + ASSERT0(arg_count == 5); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER, BA_NUM}, 2)) return false; Type *original = type_flatten(args[0]->type); if (original != type_voidptr) @@ -975,7 +975,7 @@ bool sema_expr_analyse_builtin_call(SemaContext *context, Expr *expr) } case BUILTIN_ATOMIC_STORE: { - assert(arg_count == 4); + ASSERT0(arg_count == 4); if (!sema_check_builtin_args(context, args, (BuiltinArg[]) {BA_POINTER}, 1)) return false; if (!sema_check_builtin_args(context, &args[2], (BuiltinArg[]) {BA_BOOL, BA_INTEGER}, 2)) return false; Type *original = type_flatten(args[0]->type); diff --git a/src/compiler/sema_casts.c b/src/compiler/sema_casts.c index 65a24938c..0ca409e30 100644 --- a/src/compiler/sema_casts.c +++ b/src/compiler/sema_casts.c @@ -97,7 +97,7 @@ bool may_cast(SemaContext *context, Expr *expr, Type *to_type, bool is_explicit, static bool cast_is_allowed(CastContext *cc, bool is_explicit, bool is_silent) { Type *from_type = cc->from; - assert(from_type == from_type->canonical); + ASSERT0(from_type == from_type->canonical); // Check simple equality. from_type = from_type->canonical; if (from_type == cc->to) return true; @@ -154,7 +154,7 @@ void cast_to_int_to_max_bit_size(SemaContext *context, Expr *lhs, Expr *rhs, Typ unsigned bit_size_left = left_type->builtin.bitsize; unsigned bit_size_right = right_type->builtin.bitsize; - assert(bit_size_left && bit_size_right); + ASSERT0(bit_size_left && bit_size_right); // Simple case they are the same size, just return. if (bit_size_left == bit_size_right) return; @@ -245,7 +245,7 @@ Type *type_infer_len_from_actual_type(Type *to_infer, Type *actual_type) Type *actual = type_get_indexed_type(actual_type); // We should always have indexed types. - assert(indexed && actual); + ASSERT0(indexed && actual); // The underlying type may also be inferred. // In this case, infer it. @@ -265,10 +265,10 @@ Type *type_infer_len_from_actual_type(Type *to_infer, Type *actual_type) // The case of int[*][2] x = ... return type_add_optional(type_get_array(indexed, to_infer->array.len), is_optional); case TYPE_INFERRED_ARRAY: - assert(type_is_arraylike(type_flatten(actual_type))); + ASSERT0(type_is_arraylike(type_flatten(actual_type))); return type_add_optional(type_get_array(indexed, type_flatten(actual_type)->array.len), is_optional); case TYPE_INFERRED_VECTOR: - assert(type_is_arraylike(type_flatten(actual_type))); + ASSERT0(type_is_arraylike(type_flatten(actual_type))); return type_add_optional(type_get_vector(indexed, type_flatten(actual_type)->array.len), is_optional); case TYPE_SLICE: return type_add_optional(type_get_slice(indexed), is_optional); @@ -510,15 +510,15 @@ Expr *recursive_may_narrow(Expr *expr, Type *type) // For constants, just check that they will fit. if (type_is_integer(type)) { - assert(expr->const_expr.const_kind == CONST_INTEGER || expr->const_expr.const_kind == CONST_ENUM); + ASSERT0(expr->const_expr.const_kind == CONST_INTEGER || expr->const_expr.const_kind == CONST_ENUM); if (expr_const_will_overflow(&expr->const_expr, type_flatten(type)->type_kind)) { return expr; } return NULL; } - assert(type_is_float(type)); - assert(expr->const_expr.const_kind == CONST_FLOAT); + ASSERT0(type_is_float(type)); + ASSERT0(expr->const_expr.const_kind == CONST_FLOAT); if (!expr_const_float_fits_type(&expr->const_expr, type_flatten(type)->type_kind)) { return expr; @@ -567,7 +567,7 @@ Expr *recursive_may_narrow(Expr *expr, Type *type) static bool sema_error_const_int_out_of_range(CastContext *cc, Expr *expr, Expr *problem, Type *to_type) { - assert(expr_is_const(expr)); + ASSERT0(expr_is_const(expr)); if (expr->const_expr.is_character && expr->type->type_kind != TYPE_U128) { RETURN_CAST_ERROR(problem, "The unicode character U+%04x cannot fit in a %s.", (uint32_t)expr->const_expr.ixx.i.low, type_quoted_error_string(to_type)); @@ -743,7 +743,7 @@ static bool rule_ptr_to_ptr(CastContext *cc, bool is_explicit, bool is_silent) return false; case TYPE_ALIGNMENT_INCREASE: if (is_explicit) return false; - assert(!is_explicit); + ASSERT0(!is_explicit); RETURN_CAST_ERROR(cc->expr, "Implicitly casting %s (alignment %d) to %s (alignment %d) is not permitted, " "it would require an explicit cast. Before using an explicit cast, please make " @@ -852,7 +852,7 @@ static bool rule_arrptr_to_slice(CastContext *cc, bool is_explicit, bool is_sile static bool rule_ulist_to_struct(CastContext *cc, bool is_explicit, bool is_silent) { - assert(expr_is_const_untyped_list(cc->expr)); + ASSERT0(expr_is_const_untyped_list(cc->expr)); Expr **expressions = cc->expr->const_expr.untyped_list; unsigned size = vec_size(expressions); if (!size) return true; @@ -895,7 +895,7 @@ static bool rule_ulist_to_vecarr(CastContext *cc, bool is_explicit, bool is_sile static bool rule_ulist_to_slice(CastContext *cc, bool is_explicit, bool is_silent) { - assert(expr_is_const_untyped_list(cc->expr)); + ASSERT0(expr_is_const_untyped_list(cc->expr)); Type *base = cc->to->array.base; FOREACH(Expr *, expr, cc->expr->const_expr.untyped_list) { @@ -949,53 +949,34 @@ static bool rule_slice_to_slice(CastContext *cc, bool is_explicit, bool is_silen Type *from_base = from_type->array.base; Type *array_base = cc->to->array.base; - if (is_explicit) - { - array_base = type_flatten(array_base); - from_base = type_flatten(from_base); - } // Same base type? Ok - if (from_base == array_base) return true; - - // This is allowed: void*[] -> int*[] and int*[] -> void*[] - if ((from_base == type_voidptr && type_is_pointer_type(array_base)) || (array_base == type_voidptr && type_is_pointer_type(from_base))) return true; - - if (is_silent) return false; + if (from_base->canonical == array_base->canonical) return true; - // Allow converting to any type with the same size (and a smaller or same alignment) - if (type_size(array_base) != type_size(from_base)) + switch (type_array_element_is_equivalent(cc->context, array_base, from_base, is_explicit)) { - if (!is_explicit) return sema_cast_error(cc, false, is_silent); - if (is_silent) return false; - RETURN_CAST_ERROR(cc->expr, "%s cannot be cast to %s as its elements have different size.", - type_quoted_error_string(cc->expr->type), type_quoted_error_string(cc->to_type)); - } - if (type_abi_alignment(from_base) < type_abi_alignment(array_base)) - { - if (!is_explicit) return sema_cast_error(cc, false, is_silent); - if (is_silent) return false; - RETURN_CAST_ERROR(cc->expr, - "%s cannot be cast to %s as its elements has a greater default alignment, but you can use a bitcast.", - type_quoted_error_string(cc->expr->type), type_quoted_error_string(cc->to_type)); + case TYPE_ERROR: + return false; + case TYPE_MISMATCH: + case TYPE_ALIGNMENT_INCREASE: + if (is_silent) return false; + return sema_cast_error(cc, is_explicit ? false : rule_slice_to_slice(cc, true, true), is_silent); + case TYPE_SAME: + case TYPE_SAME_INT_SIZE: + return true; } - if (!is_explicit) return sema_cast_error(cc, true, is_silent); - return true; + UNREACHABLE } static bool rule_arr_to_arr(CastContext *cc, bool is_explicit, bool is_silent) { + if (!rule_slice_to_slice(cc, is_explicit, is_silent)) return false; if (type_flatten(cc->to)->array.len != type_flatten(cc->from)->array.len) { if (is_silent) return false; RETURN_CAST_ERROR(cc->expr, "Arrays of different lengths may not be converted."); } - if (type_size(cc->from) != type_size(cc->to)) - { - if (is_silent) return false; - RETURN_CAST_ERROR(cc->expr, "Arrays of different size may not be converted."); - } - return rule_slice_to_slice(cc, is_explicit, is_silent); + return true; } static bool rule_arr_to_vec(CastContext *cc, bool is_explicit, bool is_silent) @@ -1122,7 +1103,7 @@ RETRY:; } else { - assert(decl->decl_kind == DECL_STRUCT); + ASSERT0(decl->decl_kind == DECL_STRUCT); inner = decl->strukt.members[0]->type->canonical; } if (!type_may_implement_interface(inner)) return false; @@ -1275,7 +1256,7 @@ static bool rule_not_applicable(CastContext *cc, bool is_explicit, bool is_silen static bool rule_from_distinct(CastContext *cc, bool is_explicit, bool is_silent) { Type *from_type = cc->from; - assert(from_type == from_type->canonical); + ASSERT0(from_type == from_type->canonical); // Explicit just flattens and tries again. if (is_explicit) @@ -1295,7 +1276,7 @@ static bool rule_from_distinct(CastContext *cc, bool is_explicit, bool is_silent static bool rule_to_distinct(CastContext *cc, bool is_explicit, bool is_silent) { Type *from_type = cc->from; - assert(from_type == from_type->canonical); + ASSERT0(from_type == from_type->canonical); Type *flat = type_flatten(cc->to); ConvGroup flat_group = type_to_group(flat); Expr *expr = cc->expr; @@ -1468,8 +1449,8 @@ static bool rule_bits_to_int(CastContext *cc, bool is_explicit, bool is_silent) */ static inline bool insert_runtime_cast(Expr *expr, CastKind kind, Type *type) { - assert(expr->resolve_status == RESOLVE_DONE); - assert(expr->type); + ASSERT0(expr->resolve_status == RESOLVE_DONE); + ASSERT0(expr->type); Expr *inner = expr_copy(expr); expr->expr_kind = EXPR_CAST; expr->cast_expr.kind = kind; @@ -1581,7 +1562,7 @@ static void cast_ptr_to_ptr(SemaContext *context, Expr *expr, Type *type) static void cast_float_to_float(SemaContext *context, Expr *expr, Type *type) { // Change to same type should never enter here. - assert(type_flatten(type) != type_flatten(expr->type)); + ASSERT0(type_flatten(type) != type_flatten(expr->type)); // Insert runtime cast if needed. if (insert_runtime_cast_unless_const(expr, CAST_FPFP, type)) return; @@ -1615,14 +1596,14 @@ static void cast_float_to_int(SemaContext *context, Expr *expr, Type *type) static void cast_int_to_enum(SemaContext *context, Expr *expr, Type *type) { Type *canonical = type_flatten(type); - assert(canonical->type_kind == TYPE_ENUM); + ASSERT0(canonical->type_kind == TYPE_ENUM); if (insert_runtime_cast_unless_const(expr, CAST_INTENUM, type)) return; Decl *enum_decl = canonical->decl; // Fold the const into the actual enum. // Check is already done. Decl *decl = enum_decl->enums.values[expr->const_expr.ixx.i.low]; - assert(decl->resolve_status == RESOLVE_DONE); + ASSERT0(decl->resolve_status == RESOLVE_DONE); expr->const_expr = (ExprConst) { .enum_err_val = decl, .const_kind = CONST_ENUM @@ -1652,7 +1633,7 @@ static inline Type *type_flatten_to_int(Type *type) case TYPE_TYPEDEF: UNREACHABLE default: - assert(type_is_integer(type)); + ASSERT0(type_is_integer(type)); return type; } } @@ -1713,7 +1694,7 @@ static void cast_bitstruct_to_bool(SemaContext *context, Expr *expr, Type *type) expr_rewrite_const_bool(expr, type, false); return; } - assert(expr->const_expr.initializer->kind == CONST_INIT_STRUCT); + ASSERT0(expr->const_expr.initializer->kind == CONST_INIT_STRUCT); FOREACH(ConstInitializer *, in, expr->const_expr.initializer->init_struct) { if (in->kind == CONST_INIT_ZERO) continue; @@ -1767,11 +1748,11 @@ static void cast_int_to_float(SemaContext *context, Expr *expr, Type *type) static void cast_enum_to_int(SemaContext *context, Expr* expr, Type *to_type) { - assert(type_flatten(expr->type)->type_kind == TYPE_ENUM); + ASSERT0(type_flatten(expr->type)->type_kind == TYPE_ENUM); Type *underlying_type = type_base(expr->type); if (sema_cast_const(expr)) { - assert(expr->const_expr.const_kind == CONST_ENUM); + ASSERT0(expr->const_expr.const_kind == CONST_ENUM); expr_rewrite_const_int(expr, underlying_type, expr->const_expr.enum_err_val->enum_constant.ordinal); } if (expr->expr_kind == EXPR_CAST && expr->cast_expr.kind == CAST_INTENUM) @@ -1790,7 +1771,7 @@ static void cast_vec_to_arr(SemaContext *context, Expr *expr, Type *to_type) { if (insert_runtime_cast_unless_const(expr, CAST_VECARR, to_type)) return; - assert(expr->const_expr.const_kind == CONST_INITIALIZER); + ASSERT0(expr->const_expr.const_kind == CONST_INITIALIZER); ConstInitializer *list = expr->const_expr.initializer; list->type = type_flatten(to_type); expr->type = to_type; @@ -1893,7 +1874,7 @@ static void cast_vec_to_vec(SemaContext *context, Expr *expr, Type *to_type) } } - assert(expr->const_expr.const_kind == CONST_INITIALIZER); + ASSERT0(expr->const_expr.const_kind == CONST_INITIALIZER); // For the const initializer we need to change the internal type ConstInitializer *list = expr->const_expr.initializer; @@ -1904,12 +1885,12 @@ static void cast_vec_to_vec(SemaContext *context, Expr *expr, Type *to_type) static void cast_untyped_list_to_other(SemaContext *context, Expr *expr, Type *to_type) { - assert(expr_is_const_untyped_list(expr)); + ASSERT0(expr_is_const_untyped_list(expr)); // Recursively set the type of all ConstInitializer inside. expr_recursively_rewrite_untyped_list(expr, expr->const_expr.untyped_list); // We can now analyse the list (this is where the actual check happens) bool success = sema_expr_analyse_initializer_list(context, type_flatten(to_type), expr); - assert(success); + ASSERT0(success); // And set the type. expr->type = type_infer_len_from_actual_type(to_type, expr->type); } @@ -1918,7 +1899,7 @@ static void cast_anyfault_to_fault(SemaContext *context, Expr *expr, Type *type) { if (insert_runtime_cast_unless_const(expr, CAST_EUER, type) || !expr_is_const_fault(expr)) return; Decl *value = expr->const_expr.enum_err_val; - assert(value->type != type); + ASSERT0(value->type != type); expr->type = type; } @@ -1938,7 +1919,7 @@ static void cast_slice_to_ptr(SemaContext *context, Expr *expr, Type *type) */ static void cast_int_to_ptr(SemaContext *context, Expr *expr, Type *type) { - assert(type_bit_size(type_uptr) <= 64 && "For > 64 bit pointers, this code needs updating."); + ASSERT0(type_bit_size(type_uptr) <= 64 && "For > 64 bit pointers, this code needs updating."); // Handle const: if (sema_cast_const(expr)) @@ -1973,7 +1954,7 @@ static void cast_bool_to_float(SemaContext *context, Expr *expr, Type *type) { if (insert_runtime_cast_unless_const(expr, CAST_BOOLFP, type)) return; - assert(expr->const_expr.const_kind == CONST_BOOL); + ASSERT0(expr->const_expr.const_kind == CONST_BOOL); expr_rewrite_const_float(expr, type, expr->const_expr.b ? 1.0 : 0.0); } @@ -2035,7 +2016,7 @@ static void cast_ptr_to_bool(SemaContext *context, Expr *expr, Type *type) // Or it's a string, in which case it is always true. - assert(expr->const_expr.const_kind == CONST_STRING); + ASSERT0(expr->const_expr.const_kind == CONST_STRING); expr_rewrite_const_bool(expr, type, true); } @@ -2073,7 +2054,7 @@ static void cast_vecarr_to_slice(SemaContext *context, Expr *expr, Type *to_type { UNREACHABLE } - assert(expr_is_const(expr)); + ASSERT0(expr_is_const(expr)); switch (expr->const_expr.const_kind) { case CONST_FLOAT: @@ -2122,14 +2103,14 @@ static void cast_slice_to_vecarr(SemaContext *context, Expr *expr, Type *to_type default: UNREACHABLE; } - assert(expr->expr_kind == EXPR_CAST); + ASSERT0(expr->expr_kind == EXPR_CAST); return; } if (expr_is_const_slice(expr)) { expr->const_expr.const_kind = CONST_INITIALIZER; } - assert(expr_is_const(expr)); + ASSERT0(expr_is_const(expr)); expr->type = to_type; return; } @@ -2137,7 +2118,7 @@ static void cast_slice_to_vecarr(SemaContext *context, Expr *expr, Type *to_type static void cast_slice_to_infer(SemaContext *context, Expr *expr, Type *to_type) { ArraySize len = sema_len_from_const(expr); - assert(len > 0); + ASSERT0(len > 0); Type *indexed = type_get_indexed_type(expr->type); to_type = type_infer_len_from_actual_type(to_type, type_get_array(indexed, len)); cast_no_check(context, expr, to_type, false); @@ -2168,7 +2149,7 @@ static void cast_arr_to_vec(SemaContext *context, Expr *expr, Type *to_type) if (sema_cast_const(expr)) { // For the array -> vector this is always a simple rewrite of type. - assert(expr->const_expr.const_kind == CONST_INITIALIZER); + ASSERT0(expr->const_expr.const_kind == CONST_INITIALIZER); ConstInitializer *list = expr->const_expr.initializer; list->type = type_flatten(to_temp); expr->type = to_temp; @@ -2185,7 +2166,7 @@ static void cast_arr_to_vec(SemaContext *context, Expr *expr, Type *to_type) static void cast_arr_to_arr(SemaContext *context, Expr *expr, Type *to_type) { - assert(type_size(to_type) == type_size(expr->type)); + ASSERT0(type_size(to_type) == type_size(expr->type)); expr->type = to_type; } @@ -2193,7 +2174,7 @@ static void cast_anyfault_to_bool(SemaContext *context, Expr *expr, Type *to_typ { if (insert_runtime_cast_unless_const(expr, CAST_EUBOOL, to_type)) return; - assert(expr->const_expr.const_kind == CONST_ERR); + ASSERT0(expr->const_expr.const_kind == CONST_ERR); expr_rewrite_const_bool(expr, type_bool, expr->const_expr.enum_err_val != NULL); } @@ -2201,7 +2182,7 @@ static void cast_typeid_to_bool(SemaContext *context, Expr *expr, Type *to_type) { if (insert_runtime_cast_unless_const(expr, CAST_IDBOOL, to_type)) return; - assert(expr->const_expr.const_kind == CONST_TYPEID); + ASSERT0(expr->const_expr.const_kind == CONST_TYPEID); expr_rewrite_const_bool(expr, type_bool, expr->const_expr.typeid != NULL); } diff --git a/src/compiler/sema_const.c b/src/compiler/sema_const.c index 6c883c87c..515fc7e14 100644 --- a/src/compiler/sema_const.c +++ b/src/compiler/sema_const.c @@ -65,7 +65,7 @@ static inline bool sema_expr_const_append(SemaContext *context, Expr *append_exp { case CONST_INITIALIZER: case CONST_SLICE: - assert(list->type != type_untypedlist); + ASSERT0(list->type != type_untypedlist); return sema_append_const_array_one(context, append_expr, list, element); case CONST_UNTYPED_LIST: untyped_list = list->const_expr.untyped_list; @@ -199,7 +199,7 @@ static bool sema_concat_bytes_and_other(SemaContext *context, Expr *expr, Expr * static bool sema_append_concat_const_bytes(SemaContext *context, Expr *expr, Expr *list, Expr *element, bool is_append) { Type *indexed = type_get_indexed_type(list->type); - assert(indexed && "This should always work"); + ASSERT0(indexed && "This should always work"); if (is_append && !cast_implicit(context, element, indexed, false)) return false; size_t str_len = list->const_expr.bytes.len; size_t element_len = is_append ? 1 : element->const_expr.bytes.len; @@ -243,7 +243,7 @@ static bool sema_append_const_array_one(SemaContext *context, Expr *expr, Expr * return true; } bool is_slice = list->const_expr.const_kind == CONST_SLICE; - assert(!type_is_inferred(array_type)); + ASSERT0(!type_is_inferred(array_type)); bool is_vector = array_type->type_kind == TYPE_VECTOR; ConstInitializer *init = is_slice ? list->const_expr.slice_init : list->const_expr.initializer; unsigned len = sema_len_from_const(list) + 1; @@ -301,7 +301,7 @@ static bool sema_append_const_array_one(SemaContext *context, Expr *expr, Expr * */ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr *left, Expr *right) { - assert(concat_expr->resolve_status == RESOLVE_RUNNING); + ASSERT0(concat_expr->resolve_status == RESOLVE_RUNNING); bool join_single = false; ArraySize len = 0; bool use_array = true; @@ -409,7 +409,7 @@ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr * ConstInitializer *init = single_expr->const_expr.initializer; if (init->kind != CONST_INIT_ARRAY_FULL) { - assert(init->type != type_untypedlist); + ASSERT0(init->type != type_untypedlist); RETURN_SEMA_ERROR(single_expr, "Expected a full array here."); } FOREACH(ConstInitializer *, val, init->init_array_full) @@ -431,7 +431,7 @@ bool sema_expr_analyse_ct_concat(SemaContext *context, Expr *concat_expr, Expr * for (int i = 0; i < 2; i++) { Expr *element = exprs[i]; - assert(element->const_expr.const_kind == CONST_INITIALIZER); + ASSERT0(element->const_expr.const_kind == CONST_INITIALIZER); ConstInitType init_type = element->const_expr.initializer->kind; switch (init_type) { diff --git a/src/compiler/sema_decls.c b/src/compiler/sema_decls.c index c60ec7c6b..485edb548 100755 --- a/src/compiler/sema_decls.c +++ b/src/compiler/sema_decls.c @@ -189,7 +189,7 @@ static inline bool sema_analyse_struct_member(SemaContext *context, Decl *parent if (decl->resolve_status == RESOLVE_RUNNING) RETURN_SEMA_ERROR(decl, "Circular dependency resolving member."); // Mark the unit, it should not have been assigned at this point. - assert(!decl->unit || decl->unit->module->is_generic || decl->unit == parent->unit); + ASSERT0(!decl->unit || decl->unit->module->is_generic || decl->unit == parent->unit); decl->unit = parent->unit; // Pick the domain for attribute analysis. @@ -236,10 +236,10 @@ static inline bool sema_analyse_struct_member(SemaContext *context, Decl *parent { case DECL_VAR: { - assert(decl->var.kind == VARDECL_MEMBER); + ASSERT0(decl->var.kind == VARDECL_MEMBER); decl->resolve_status = RESOLVE_RUNNING; // Inferred types are not strictly allowed, but we use the int[*] for the flexible array member. - assert(type_infoptrzero(decl->var.type_info)); + ASSERT0(type_infoptrzero(decl->var.type_info)); TypeInfo *type_info = type_infoptr(decl->var.type_info); if (!sema_resolve_type_info(context, type_info, RESOLVE_TYPE_ALLOW_FLEXIBLE)) return decl_poison(decl); Type *type = type_info->type; @@ -279,7 +279,7 @@ static inline bool sema_check_struct_holes(SemaContext *context, Decl *decl, Dec { Type* member_type = type_flatten(member->type); if (!type_is_union_or_strukt(member_type)) return true; - assert(decl_is_struct_type(member_type->decl)); + ASSERT0(decl_is_struct_type(member_type->decl)); if (!member_type->decl->strukt.padded_decl_id) return true; if (!decl->strukt.padded_decl_id) decl->strukt.padded_decl_id = member_type->decl->strukt.padded_decl_id; if (decl->attr_compact) @@ -312,7 +312,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) bool has_named_parameter = false; Decl **members = decl->strukt.members; unsigned member_count = vec_size(members); - assert(member_count > 0); + ASSERT0(member_count > 0); // Check all members for (unsigned i = 0; i < member_count; i++) @@ -351,7 +351,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) if (!sema_check_struct_holes(context, decl, member)) return false; ByteSize member_size = type_size(member->type); - assert(member_size <= MAX_TYPE_SIZE); + ASSERT0(member_size <= MAX_TYPE_SIZE); // Update max alignment if (member->alignment > member_alignment) member_alignment = member->alignment; if (member_alignment > max_alignment) @@ -375,7 +375,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) member->offset = 0; } - assert(decl_ok(decl)); + ASSERT0(decl_ok(decl)); // 1. If packed, then the alignment is zero, unless previously given if (decl->is_packed && !decl->alignment) decl->alignment = 1; @@ -387,7 +387,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) decl->is_packed = decl->is_packed && max_alignment > 1; // "Representative" type is the one with the maximum alignment. - assert(max_alignment_element >= 0); + ASSERT0(max_alignment_element >= 0); decl->strukt.union_rep = max_alignment_element; // All members share the same alignment @@ -396,7 +396,7 @@ static bool sema_analyse_union_members(SemaContext *context, Decl *decl) member->alignment = decl->alignment; } - assert(max_size); + ASSERT0(max_size); // The actual size might be larger than the max size due to alignment. AlignSize size = aligned_offset(max_size, decl->alignment); @@ -488,7 +488,7 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) bool is_packed = decl->is_packed; Decl **struct_members = decl->strukt.members; unsigned member_count = vec_size(struct_members); - assert(member_count > 0 && "This analysis should only be called on member_count > 0"); + ASSERT0(member_count > 0 && "This analysis should only be called on member_count > 0"); for (unsigned i = 0; i < member_count; i++) { @@ -547,7 +547,7 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) decl->has_variable_array = true; } - assert(decl_ok(decl) && "The declaration should be fine at this point."); + ASSERT0(decl_ok(decl) && "The declaration should be fine at this point."); // Grab the alignment of the member type AlignSize member_type_alignment; @@ -585,14 +585,14 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) // If the natural alignment is greater, in this case the struct is unaligned. if (member_natural_alignment > member_alignment) { - assert(natural_align_offset > align_offset); + ASSERT0(natural_align_offset > align_offset); is_unaligned = true; } else { // Otherwise we have a greater offset, and in this case // we add padding for the difference. - assert(natural_align_offset < align_offset); + ASSERT0(natural_align_offset < align_offset); member->padding = align_offset - offset; } } @@ -601,7 +601,7 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) if (align_offset - offset != 0) { - assert(decl_is_struct_type(decl)); + ASSERT0(decl_is_struct_type(decl)); if (!decl->strukt.padded_decl_id) decl->strukt.padded_decl_id = declid(member); if (decl->attr_nopadding || member->attr_nopadding) { @@ -643,7 +643,7 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) } if (is_unaligned && size > offset) { - assert(!decl->strukt.padding); + ASSERT0(!decl->strukt.padding); decl->strukt.padding = (AlignSize)(size - offset); } @@ -659,7 +659,7 @@ static bool sema_analyse_struct_members(SemaContext *context, Decl *decl) if (size != offset) { - assert(decl_is_struct_type(decl)); + ASSERT0(decl_is_struct_type(decl)); if (!decl->strukt.padded_decl_id) decl->strukt.padded_decl_id = declid(decl); if (decl->attr_nopadding) { @@ -710,7 +710,7 @@ static bool sema_analyse_struct_union(SemaContext *context, Decl *decl, bool *er // Failed, so exit. if (!success) return decl_poison(decl); - assert(decl_ok(decl)); + ASSERT0(decl_ok(decl)); return true; } @@ -779,7 +779,7 @@ static inline bool sema_analyse_bitstruct_member(SemaContext *context, Decl *par if (is_consecutive) { - assert(!member->var.bit_is_expr && "Should always me inferred"); + ASSERT0(!member->var.bit_is_expr && "Should always me inferred"); if (member_type != type_bool) { SEMA_ERROR(type_info, "For bitstructs without bit ranges, the types must all be 'bool'."); @@ -1095,7 +1095,7 @@ static inline bool sema_analyse_signature(SemaContext *context, Signature *sig, bool is_macro = sig->is_macro; bool is_macro_at_name = sig->is_at_macro || sig->is_safemacro; // Check return type - assert(sig->rtype || sig->is_macro); + ASSERT0(sig->rtype || sig->is_macro); Type *rtype = NULL; if (sig->rtype) { @@ -1171,8 +1171,8 @@ static inline bool sema_analyse_signature(SemaContext *context, Signature *sig, Decl *param = params[i]; if (!param) { - assert(variadic_type == VARIADIC_RAW); - assert(i == vararg_index); + ASSERT0(variadic_type == VARIADIC_RAW); + ASSERT0(i == vararg_index); continue; } if (vararg_index < i) @@ -1193,20 +1193,20 @@ static inline bool sema_analyse_signature(SemaContext *context, Signature *sig, } if (i == 0 && param->resolve_status == RESOLVE_DONE) { - assert(param->type == type_voidptr && "Expected the first parameter of an interface method."); + ASSERT0(param->type == type_voidptr && "Expected the first parameter of an interface method."); continue; } - assert(param->resolve_status == RESOLVE_NOT_DONE && "The param shouldn't have been resolved yet."); + ASSERT0(param->resolve_status == RESOLVE_NOT_DONE && "The param shouldn't have been resolved yet."); param->resolve_status = RESOLVE_RUNNING; bool erase = false; if (!sema_analyse_attributes(context, param, param->attributes, ATTR_PARAM, &erase)) { return decl_poison(param); } - assert(!erase); + ASSERT0(!erase); param->unit = context->unit; - assert(param->decl_kind == DECL_VAR); + ASSERT0(param->decl_kind == DECL_VAR); VarDeclKind var_kind = param->var.kind; TypeInfo *type_info = type_infoptrzero(param->var.type_info); if (type_info) @@ -1257,7 +1257,7 @@ static inline bool sema_analyse_signature(SemaContext *context, Signature *sig, } bool erase_decl = false; if (!sema_analyse_attributes_for_var(context, param, &erase_decl)) return false; - assert(!erase_decl); + ASSERT0(!erase_decl); break; case VARDECL_PARAM_CT_TYPE: if (type_info) @@ -1340,8 +1340,8 @@ bool sema_analyse_function_signature(SemaContext *context, Decl *func_decl, Type // Remove the last empty value. if (variadic_type == VARIADIC_RAW) { - assert(params && !params[signature->vararg_index] && "The last parameter must have been a raw variadic."); - assert(signature->vararg_index == vec_size(params) - 1); + ASSERT0(params && !params[signature->vararg_index] && "The last parameter must have been a raw variadic."); + ASSERT0(signature->vararg_index == vec_size(params) - 1); vec_pop(params); } @@ -1350,14 +1350,14 @@ bool sema_analyse_function_signature(SemaContext *context, Decl *func_decl, Type for (unsigned i = 0; i < param_count; i++) { - assert(IS_RESOLVED(params[i])); - assert(params[i]->type->canonical); + ASSERT0(IS_RESOLVED(params[i])); + ASSERT0(params[i]->type->canonical); vec_add(types, params[i]->type); } Type *raw_type = sema_resolve_type_get_func(signature, abi); - assert(func_decl->type->type_kind == TYPE_FUNC_RAW); - assert(raw_type->function.prototype); + ASSERT0(func_decl->type->type_kind == TYPE_FUNC_RAW); + ASSERT0(raw_type->function.prototype); func_decl->type->function.prototype = raw_type->function.prototype; return true; } @@ -1422,16 +1422,16 @@ static inline bool sema_analyse_distinct(SemaContext *context, Decl *decl, bool static inline bool sema_analyse_enum_param(SemaContext *context, Decl *param) { - assert(param->decl_kind == DECL_VAR && param->var.kind == VARDECL_PARAM && param->var.type_info); + ASSERT0(param->decl_kind == DECL_VAR && param->var.kind == VARDECL_PARAM && param->var.type_info); if (vec_size(param->attributes)) { RETURN_SEMA_ERROR(param->attributes[0], "There are no valid attributes for associated values."); } TypeInfo *type_info = type_infoptrzero(param->var.type_info); if (!sema_resolve_type_info(context, type_info, RESOLVE_TYPE_DEFAULT)) return false; - assert(!param->var.vararg); + ASSERT0(!param->var.vararg); param->type = type_info->type; - assert(param->name); + ASSERT0(param->name); if (param->name == kw_nameof || param->name == kw_ordinal) { RETURN_SEMA_ERROR(param, "'%s' is not a valid parameter name for enums.", param->name); @@ -1442,7 +1442,7 @@ static inline bool sema_analyse_enum_param(SemaContext *context, Decl *param) RETURN_SEMA_ERROR(param, "Duplicate parameter name '%s'.", param->name); } sema_decl_stack_push(param); - assert(!param->var.init_expr); + ASSERT0(!param->var.init_expr); return sema_set_abi_alignment(context, param->type, ¶m->alignment); } @@ -1456,7 +1456,7 @@ static inline bool sema_analyse_enum(SemaContext *context, Decl *decl, bool *era if (!sema_resolve_type_info(context, decl->enums.type_info, RESOLVE_TYPE_DEFAULT)) return false; Type *type = decl->enums.type_info->type; - assert(!type_is_optional(type) && "Already stopped when parsing."); + ASSERT0(!type_is_optional(type) && "Already stopped when parsing."); Type *flat_underlying_type = type_flatten(type); // Require an integer type @@ -1521,8 +1521,8 @@ static inline bool sema_analyse_enum(SemaContext *context, Decl *decl, bool *era DEBUG_LOG("* Checking enum constant %s.", enum_value->name); enum_value->enum_constant.ordinal = i; DEBUG_LOG("* Ordinal: %d", i); - assert(enum_value->resolve_status == RESOLVE_NOT_DONE); - assert(enum_value->decl_kind == DECL_ENUM_CONSTANT); + ASSERT0(enum_value->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(enum_value->decl_kind == DECL_ENUM_CONSTANT); // Start evaluating the constant enum_value->resolve_status = RESOLVE_RUNNING; @@ -1594,8 +1594,8 @@ static inline bool sema_analyse_error(SemaContext *context, Decl *decl, bool *er DEBUG_LOG("* Checking error value %s.", enum_value->name); enum_value->enum_constant.ordinal = i; DEBUG_LOG("* Ordinal: %d", i); - assert(enum_value->resolve_status == RESOLVE_NOT_DONE); - assert(enum_value->decl_kind == DECL_FAULTVALUE); + ASSERT0(enum_value->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(enum_value->decl_kind == DECL_FAULTVALUE); // Start evaluating the constant enum_value->resolve_status = RESOLVE_DONE; @@ -1757,7 +1757,7 @@ bool sema_decl_if_cond(SemaContext *context, Decl *decl) { Attr *attr = attr_find_kind(decl->attributes, ATTRIBUTE_IF); decl->is_if = true; - assert(attr); + ASSERT0(attr); if (vec_size(attr->exprs) != 1) { RETURN_SEMA_ERROR(attr, "Expected an argument to '@if'."); @@ -1779,7 +1779,7 @@ bool sema_decl_if_cond(SemaContext *context, Decl *decl) INLINE SourceSpan method_find_overload_span(Decl *method) { - assert(method->resolved_attributes && method->attrs_resolved); + ASSERT0(method->resolved_attributes && method->attrs_resolved); return method->attrs_resolved->overload; } @@ -1963,7 +1963,7 @@ INLINE bool sema_analyse_operator_method(SemaContext *context, Type *parent_type static inline bool unit_add_method(SemaContext *context, Type *parent_type, Decl *method) { CompilationUnit *unit = context->unit; - assert(parent_type->canonical == parent_type); + ASSERT0(parent_type->canonical == parent_type); const char *name = method->name; // Did we already define it externally? @@ -2086,7 +2086,7 @@ static inline Decl *sema_find_interface_for_method(SemaContext *context, Canonic Decl *match = sema_interface_method_by_name(context, interface, name); if (!decl_ok(match)) return poisoned_decl; if (!match) continue; - assert(interface->resolve_status == RESOLVE_DONE); + ASSERT0(interface->resolve_status == RESOLVE_DONE); // Is there a already a match? if (first_match) @@ -2207,7 +2207,7 @@ static inline bool sema_analyse_method(SemaContext *context, Decl *decl) // Resolve the parent type. TypeInfo *parent_type = type_infoptr(decl->func_decl.type_parent); - assert(parent_type->resolve_status == RESOLVE_DONE); + ASSERT0(parent_type->resolve_status == RESOLVE_DONE); Type *par_type = parent_type->type->canonical; // Resolve declaration of parent as needed. @@ -2241,7 +2241,7 @@ static inline bool sema_analyse_method(SemaContext *context, Decl *decl) // If it's implementing a method, check it. if (implemented_method) { - assert(implemented_method->resolve_status == RESOLVE_DONE); + ASSERT0(implemented_method->resolve_status == RESOLVE_DONE); if (!sema_compare_method_with_interface(context, decl, implemented_method)) return false; decl->func_decl.interface_method = declid(implemented_method); } @@ -2371,7 +2371,7 @@ static bool update_call_abi_from_string(SemaContext *context, Decl *decl, Expr * static bool sema_analyse_attribute(SemaContext *context, ResolvedAttrData *attr_data, Decl *decl, Attr *attr, AttributeDomain domain, bool *erase_decl) { AttributeType type = attr->attr_kind; - assert(type >= 0 && type < NUMBER_OF_ATTRIBUTES); + ASSERT0(type >= 0 && type < NUMBER_OF_ATTRIBUTES); // NOLINTBEGIN(*.EnumCastOutOfRange) static AttributeDomain attribute_domain[NUMBER_OF_ATTRIBUTES] = { [ATTRIBUTE_ADHOC] = USER_DEFINED_TYPES, @@ -2511,7 +2511,7 @@ static bool sema_analyse_attribute(SemaContext *context, ResolvedAttrData *attr_ break; case ATTRIBUTE_OPERATOR: { - assert(decl->decl_kind == DECL_FUNC || decl->decl_kind == DECL_MACRO); + ASSERT0(decl->decl_kind == DECL_FUNC || decl->decl_kind == DECL_MACRO); if (!expr) goto FAILED_OP_TYPE; if (decl->operator) { @@ -2780,7 +2780,7 @@ static bool sema_analyse_attribute(SemaContext *context, ResolvedAttrData *attr_ decl->is_weak = true; break; case ATTRIBUTE_NAKED: - assert(domain == ATTR_FUNC); + ASSERT0(domain == ATTR_FUNC); decl->func_decl.attr_naked = true; break; case ATTRIBUTE_OVERLAP: @@ -3198,7 +3198,7 @@ static inline Decl *sema_create_synthetic_main(SemaContext *context, Decl *decl, } } case MAIN_TYPE_NO_ARGS: - assert(!is_wmain); + ASSERT0(!is_wmain); if (is_winmain) { switch (type) @@ -3217,7 +3217,7 @@ static inline Decl *sema_create_synthetic_main(SemaContext *context, Decl *decl, default: UNREACHABLE } case MAIN_TYPE_WIN: - assert(is_winmain); + ASSERT0(is_winmain); switch (type) { case 0 : main_invoker = "@win_to_void_main"; goto NEXT; @@ -3257,7 +3257,7 @@ NEXT:; static inline bool sema_analyse_main_function(SemaContext *context, Decl *decl) { - assert(decl != context->unit->main_function); + ASSERT0(decl != context->unit->main_function); bool is_winmain = decl->func_decl.attr_winmain; bool is_win32 = compiler.platform.os == OS_TYPE_WIN32; if (decl->visibility != VISIBLE_PUBLIC) @@ -3358,7 +3358,7 @@ static inline bool sema_analyse_func(SemaContext *context, Decl *decl, bool *era } if (is_test || is_benchmark || is_init_finalizer) { - assert(!is_interface_method); + ASSERT0(!is_interface_method); if (vec_size(sig->params)) { RETURN_SEMA_ERROR(sig->params[0], "%s functions may not take any parameters.", @@ -3390,7 +3390,7 @@ static inline bool sema_analyse_func(SemaContext *context, Decl *decl, bool *era decl->type = type_new_func(decl, sig); if (!sema_analyse_function_signature(context, decl, type_infoptrzero(decl->func_decl.type_parent), sig->abi, sig)) return decl_poison(decl); TypeInfo *rtype_info = type_infoptr(sig->rtype); - assert(rtype_info); + ASSERT0(rtype_info); Type *rtype = rtype_info->type->canonical; if (sig->attrs.nodiscard) { @@ -3438,7 +3438,7 @@ static inline bool sema_analyse_func(SemaContext *context, Decl *decl, bool *era static inline bool sema_is_valid_method_param(SemaContext *context, Decl *param, Type *parent_type, bool is_dynamic) { - assert(parent_type->canonical == parent_type && "Expected already the canonical version."); + ASSERT0(parent_type->canonical == parent_type && "Expected already the canonical version."); Type *param_type = param->type; if (!param_type) goto ERROR; @@ -3478,7 +3478,7 @@ static bool sema_analyse_macro_method(SemaContext *context, Decl *decl) { // Resolve the type of the method. TypeInfo *parent_type_info = type_infoptr(decl->func_decl.type_parent); - assert(parent_type_info->resolve_status == RESOLVE_DONE); + ASSERT0(parent_type_info->resolve_status == RESOLVE_DONE); Type *parent_type = parent_type_info->type->canonical; // Check the first argument. @@ -3508,11 +3508,11 @@ INLINE bool sema_analyse_macro_body(SemaContext *context, Decl **body_parameters unsigned body_param_count = vec_size(body_parameters); for (unsigned i = 0; i < body_param_count; i++) { - assert(body_parameters); + ASSERT0(body_parameters); Decl *param = body_parameters[i]; - assert(param); + ASSERT0(param); param->resolve_status = RESOLVE_RUNNING; - assert(param->decl_kind == DECL_VAR); + ASSERT0(param->decl_kind == DECL_VAR); TypeInfo *type_info = type_infoptrzero(param->var.type_info); VarDeclKind kind = param->var.kind; switch (kind) @@ -3575,7 +3575,7 @@ static inline bool sema_analyse_macro(SemaContext *context, Decl *decl, bool *er if (typeget(decl->func_decl.signature.rtype) == type_void) RETURN_SEMA_ERROR(decl, "'@const' macros may not return 'void', they should always return a constant value."); if (body_parameters) RETURN_SEMA_ERROR(decl, "'@const' macros cannot have body parameters."); Ast *body = astptr(decl->func_decl.body); - assert(body->ast_kind == AST_COMPOUND_STMT); + ASSERT0(body->ast_kind == AST_COMPOUND_STMT); body = astptrzero(body->compound_stmt.first_stmt); if (!body) RETURN_SEMA_ERROR(decl, "'@const' macros cannot have an empty body."); while (body) @@ -3688,7 +3688,7 @@ static bool sema_analyse_variable_type(SemaContext *context, Type *type, SourceS bool sema_analyse_var_decl_ct(SemaContext *context, Decl *decl) { Expr *init; - assert(decl->decl_kind == DECL_VAR && "Should only be called on variables."); + ASSERT0(decl->decl_kind == DECL_VAR && "Should only be called on variables."); // Grab the optional type_info. TypeInfo *type_info = vartype(decl); @@ -3787,7 +3787,7 @@ bool sema_analyse_var_decl_ct(SemaContext *context, Decl *decl) */ bool sema_analyse_var_decl(SemaContext *context, Decl *decl, bool local) { - assert(decl->decl_kind == DECL_VAR && "Unexpected declaration type"); + ASSERT0(decl->decl_kind == DECL_VAR && "Unexpected declaration type"); VarDeclKind kind = decl->var.kind; @@ -3808,7 +3808,7 @@ bool sema_analyse_var_decl(SemaContext *context, Decl *decl, bool local) TypeInfo *type_info = vartype(decl); // We expect a constant to actually be parsed correctly so that it has a value, so // this should always be true. - assert(type_info || decl->var.init_expr); + ASSERT0(type_info || decl->var.init_expr); if (is_global) { @@ -3838,7 +3838,7 @@ bool sema_analyse_var_decl(SemaContext *context, Decl *decl, bool local) if (decl->is_extern && decl->var.init_expr) { - assert(is_global); + ASSERT0(is_global); SEMA_ERROR(decl->var.init_expr, "Extern globals may not have initializers."); return decl_poison(decl); } @@ -3855,7 +3855,7 @@ bool sema_analyse_var_decl(SemaContext *context, Decl *decl, bool local) // 1a. We require an init expression. if (!init_expr) { - assert(kind == VARDECL_CONST); + ASSERT0(kind == VARDECL_CONST); SEMA_ERROR(decl, "Constants need to have an initial value."); return decl_poison(decl); } @@ -3864,7 +3864,7 @@ bool sema_analyse_var_decl(SemaContext *context, Decl *decl, bool local) SEMA_ERROR(decl, "Defining a variable using 'var %s = ...' is only allowed inside a macro.", decl->name); return decl_poison(decl); } - assert(!decl->var.no_init); + ASSERT0(!decl->var.no_init); if (!type_info) { if (!sema_analyse_expr(context, init_expr)) return decl_poison(decl); @@ -4011,7 +4011,7 @@ static CompilationUnit *unit_copy(Module *module, CompilationUnit *unit) copy->global_decls = copy_decl_list_single_for_unit(unit->global_decls); copy->global_cond_decls = copy_decl_list_single_for_unit(unit->global_cond_decls); copy->module = module; - assert(!unit->functions && !unit->macro_methods && !unit->methods && !unit->enums && !unit->ct_includes && !unit->types); + ASSERT0(!unit->functions && !unit->macro_methods && !unit->methods && !unit->enums && !unit->ct_includes && !unit->types); return copy; } @@ -4049,7 +4049,7 @@ static Module *module_instantiate_generic(SemaContext *context, Module *module, if (!sema_resolve_type_info(context, type_info, RESOLVE_TYPE_DEFAULT)) return false; Decl *decl = decl_new_with_type(param_name, params[i]->span, DECL_TYPEDEF); decl->resolve_status = RESOLVE_DONE; - assert(type_info->resolve_status == RESOLVE_DONE); + ASSERT0(type_info->resolve_status == RESOLVE_DONE); decl->typedef_decl.type_info = type_info; decl->type->name = decl->name; decl->type->canonical = type_info->type->canonical; @@ -4123,7 +4123,7 @@ static bool sema_generate_parameterized_name_to_scratch(SemaContext *context, Mo SEMA_ERROR(param, "Only integer, bool, fault and enum values may be generic arguments."); return poisoned_decl; } - assert(expr_is_const(param)); + ASSERT0(expr_is_const(param)); } } @@ -4210,15 +4210,15 @@ static bool sema_generate_parameterized_name_to_scratch(SemaContext *context, Mo static bool sema_analyse_generic_module_contracts(SemaContext *c, Module *module, SourceSpan error_span) { - assert(module->contracts); + ASSERT0(module->contracts); AstId contract = module->contracts; while (contract) { Ast *ast = astptr(contract); contract = ast->next; - assert(ast->ast_kind == AST_CONTRACT); + ASSERT0(ast->ast_kind == AST_CONTRACT); SemaContext temp_context; - assert(ast->contract_stmt.kind == CONTRACT_REQUIRE); + ASSERT0(ast->contract_stmt.kind == CONTRACT_REQUIRE); SemaContext *new_context = context_transform_for_eval(c, &temp_context, module->units[0]); FOREACH(Expr *, expr, ast->contract_stmt.contract.decl_exprs->expression_list) { @@ -4257,13 +4257,13 @@ Decl *sema_analyse_parameterized_identifier(SemaContext *c, Path *decl_path, con if (!unit_resolve_parameterized_symbol(c, &name_resolve)) return poisoned_decl; Decl *alias = name_resolve.found; - assert(alias); + ASSERT0(alias); Module *module = alias->unit->module; unsigned parameter_count = vec_size(module->parameters); - assert(parameter_count > 0); + ASSERT0(parameter_count > 0); if (parameter_count != vec_size(params)) { - assert(vec_size(params)); + ASSERT0(vec_size(params)); sema_error_at(c, extend_span_with_token(params[0]->span, vectail(params)->span), "The generic module expected %d arguments, but you supplied %d, did you make a mistake?", parameter_count, @@ -4489,7 +4489,7 @@ bool sema_analyse_decl(SemaContext *context, Decl *decl) goto FAILED; } decl->resolve_status = RESOLVE_RUNNING; - assert(decl->unit); + ASSERT0(decl->unit); bool erase_decl = false; switch (decl->decl_kind) { diff --git a/src/compiler/sema_expr.c b/src/compiler/sema_expr.c index de9b97830..33f3e1b0d 100644 --- a/src/compiler/sema_expr.c +++ b/src/compiler/sema_expr.c @@ -1064,7 +1064,7 @@ static inline bool sema_expr_analyse_identifier(SemaContext *context, Type *to, static inline bool sema_expr_analyse_ct_identifier(SemaContext *context, Expr *expr, CheckType check) { - assert(expr && expr->ct_ident_expr.identifier); + ASSERT0(expr && expr->ct_ident_expr.identifier); DEBUG_LOG("Resolving identifier '%s'", expr->ct_ident_expr.identifier); Decl *decl = sema_resolve_symbol(context, expr->ct_ident_expr.identifier, NULL, expr->span); @@ -1089,7 +1089,7 @@ static inline bool sema_expr_analyse_ct_identifier(SemaContext *context, Expr *e static inline bool sema_expr_analyse_hash_identifier(SemaContext *context, Type *infer_type, Expr *expr) { - assert(expr && expr->hash_ident_expr.identifier); + ASSERT0(expr && expr->hash_ident_expr.identifier); DEBUG_LOG("Resolving identifier '%s'", expr->hash_ident_expr.identifier); Decl *decl = sema_resolve_symbol(context, expr->hash_ident_expr.identifier, NULL, expr->span); @@ -1345,7 +1345,7 @@ static bool sema_analyse_parameter(SemaContext *context, Expr *arg, Decl *param, } if (!param->alignment) { - assert(macro && "Only in the macro case should we need to insert the alignment."); + ASSERT0(macro && "Only in the macro case should we need to insert the alignment."); if (!sema_set_alloca_alignment(context, arg->type, ¶m->alignment)) return false; } break; @@ -1356,7 +1356,7 @@ static bool sema_analyse_parameter(SemaContext *context, Expr *arg, Decl *param, break; case VARDECL_PARAM_CT: // $foo - assert(macro); + ASSERT0(macro); if (!sema_analyse_expr_rhs(context, type, arg, true, no_match_ref, false)) { SEMA_NOTE(definition, "The definition is here."); @@ -1546,7 +1546,7 @@ INLINE bool sema_call_evaluate_arguments(SemaContext *context, CalledDecl *calle unsigned num_args = vec_size(args); Decl **params = callee->params; - assert(func_param_count < MAX_PARAMS); + ASSERT0(func_param_count < MAX_PARAMS); Expr **actual_args = VECNEW(Expr*, func_param_count); for (unsigned i = 0; i < func_param_count; i++) { @@ -1564,7 +1564,7 @@ INLINE bool sema_call_evaluate_arguments(SemaContext *context, CalledDecl *calle { Expr *arg = args[i]; if (i > 0) last = args[i - 1]; - assert(expr_ok(arg)); + ASSERT0(expr_ok(arg)); if (arg->expr_kind == EXPR_VASPLAT) { Expr **new_args = sema_vasplat_insert(context, args, arg, i); @@ -1956,11 +1956,11 @@ static inline Type *context_unify_returns(SemaContext *context) // 5. No match -> error. if (!max) { - assert(return_stmt); + ASSERT0(return_stmt); SEMA_ERROR(return_stmt, "Cannot find a common parent type of %s and %s", type_quoted_error_string(rtype), type_quoted_error_string(common_type)); Ast *prev = context->returns[i - 1]; - assert(prev); + ASSERT0(prev); SEMA_NOTE(prev, "The previous return was here."); return NULL; } @@ -1970,12 +1970,12 @@ static inline Type *context_unify_returns(SemaContext *context) all_returns_need_casts = true; } - assert(common_type); + ASSERT0(common_type); // 7. Insert casts. if (all_returns_need_casts) { - assert(common_type != type_wildcard); + ASSERT0(common_type != type_wildcard); FOREACH(Ast *, return_stmt, context->returns) { if (!return_stmt) continue; @@ -2844,7 +2844,7 @@ static Expr *sema_expr_find_index_type_or_overload_for_subscript(SemaContext *co if (overload) { *overload_ptr = overload; - assert(vec_size(overload->func_decl.signature.params) == 3); + ASSERT0(vec_size(overload->func_decl.signature.params) == 3); *index_type_ptr = overload->func_decl.signature.params[2]->type; return current_expr; } @@ -2855,7 +2855,7 @@ static Expr *sema_expr_find_index_type_or_overload_for_subscript(SemaContext *co if (overload) { *overload_ptr = overload; - assert(overload->func_decl.signature.rtype); + ASSERT0(overload->func_decl.signature.rtype); *index_type_ptr = type_infoptr(overload->func_decl.signature.rtype)->type; return current_expr; } @@ -2878,7 +2878,7 @@ static Expr *sema_expr_find_index_type_or_overload_for_subscript(SemaContext *co static inline bool sema_expr_analyse_subscript(SemaContext *context, Expr *expr, CheckType check, bool check_valid) { - assert(expr->expr_kind == EXPR_SUBSCRIPT || expr->expr_kind == EXPR_SUBSCRIPT_ADDR); + ASSERT0(expr->expr_kind == EXPR_SUBSCRIPT || expr->expr_kind == EXPR_SUBSCRIPT_ADDR); bool is_eval_ref = expr->expr_kind == EXPR_SUBSCRIPT_ADDR; // Evaluate the expression to index. @@ -2925,7 +2925,7 @@ static inline bool sema_expr_analyse_subscript(SemaContext *context, Expr *expr, if (!overload) current_type = type_flatten(current_expr->type); } - assert(current_type == current_type->canonical); + ASSERT0(current_type == current_type->canonical); int64_t index_value = -1; bool start_from_end = expr->subscript_expr.index.start_from_end; if (start_from_end && (current_type->type_kind == TYPE_POINTER || current_type->type_kind == TYPE_FLEXIBLE_ARRAY)) @@ -3145,7 +3145,7 @@ typedef enum RangeEnv INLINE bool sema_expre_analyse_range_internal(SemaContext *context, Range *range, Type *indexed_type, ArrayIndex len, RangeEnv env) { Expr *start = exprptr(range->start); - assert(start); + ASSERT0(start); Expr *end = exprptrzero(range->end); if (!sema_analyse_expr(context, start)) return false; @@ -3332,7 +3332,7 @@ static inline bool sema_expr_analyse_range(SemaContext *context, Range *range, T static inline void sema_slice_initializer(SemaContext *context, Expr *expr, Expr *subscripted, Range *range) { ConstInitializer *initializer = subscripted->const_expr.initializer; - assert(type_is_arraylike(initializer->type)); + ASSERT0(type_is_arraylike(initializer->type)); Type *new_type = type_get_slice(type_get_indexed_type(subscripted->type)); // Turn zero length into an untyped list. if (range->len_index == 0) @@ -3437,7 +3437,7 @@ static inline bool sema_expr_analyse_slice(SemaContext *context, Expr *expr, Che if (type->type_kind != TYPE_SLICE) { Type *index = type_get_indexed_type(type); - assert(index); + ASSERT0(index); original_type = type_get_slice(index); } subscripted->type = original_type; @@ -3445,7 +3445,7 @@ static inline bool sema_expr_analyse_slice(SemaContext *context, Expr *expr, Che return true; } case CONST_UNTYPED_LIST: - assert(!type_is_arraylike(subscripted->type)); + ASSERT0(!type_is_arraylike(subscripted->type)); vec_erase_front(subscripted->const_expr.untyped_list, range->start_index); vec_resize(subscripted->const_expr.untyped_list, range->len_index); expr_replace(expr, subscripted); @@ -3456,7 +3456,7 @@ static inline bool sema_expr_analyse_slice(SemaContext *context, Expr *expr, Che case CONST_SLICE: if (!subscripted->const_expr.slice_init) { - assert(range->len_index == 0); + ASSERT0(range->len_index == 0); expr_replace(expr, subscripted); return true; } @@ -5011,7 +5011,7 @@ static inline bool sema_expr_analyse_access(SemaContext *context, Expr *expr, bo } if (ambiguous) { - assert(member); + ASSERT0(member); RETURN_SEMA_ERROR(expr, "'%s' is an ambiguous name and so cannot be resolved, it may refer to method defined in '%s' or one in '%s'", kw, member->unit->module->name->module, ambiguous->unit->module->name->module); } @@ -5087,7 +5087,7 @@ static inline Expr **sema_prepare_splat_insert(Expr **exprs, unsigned added, uns return exprs; } unsigned size = vec_size(exprs); - assert(size); + ASSERT0(size); for (unsigned i = 1; i < added; i++) { vec_add(exprs, NULL); @@ -5213,10 +5213,10 @@ static Expr **sema_vasplat_insert(SemaContext *context, Expr **init_expressions, return init_expressions; } -Expr **sema_expand_vasplat_exprs(SemaContext *c, Expr **exprs) +Expr **sema_expand_vasplat_exprs(SemaContext *context, Expr **exprs) { - if (!c || !c->current_macro) return exprs; - + if (!context) return exprs; + bool in_macro = context->current_macro; unsigned count = vec_size(exprs); bool expand; do @@ -5224,15 +5224,54 @@ Expr **sema_expand_vasplat_exprs(SemaContext *c, Expr **exprs) expand = false; for (unsigned i = 0; i < count; i++) { - if (exprs[i]->expr_kind == EXPR_VASPLAT) + Expr *arg = exprs[i]; + ExprKind kind = arg->expr_kind; + if (in_macro && kind == EXPR_VASPLAT) { - exprs = sema_vasplat_insert(c, exprs, exprs[i], i); + exprs = sema_vasplat_insert(context, exprs, arg, i); // If we have null back it failed. if (!exprs) return NULL; count = vec_size(exprs); expand = true; break; } + if (kind == EXPR_SPLAT) + { + Expr *inner = arg->inner_expr; + if (!sema_analyse_expr(context, inner)) return false; + Type *flat = type_flatten(inner->type); + switch (flat->type_kind) + { + case TYPE_VECTOR: + case TYPE_ARRAY: + case TYPE_SLICE: + case TYPE_UNTYPED_LIST: + // These may be splatted + break; + default: + SEMA_ERROR(arg, "An argument of type %s cannot be splatted.", + type_quoted_error_string(inner->type)); + return NULL; + } + ArrayIndex len = sema_len_from_expr(inner); + if (len == -1) + { + SEMA_ERROR(arg, + "Splat may not be used with if the length is not known, but if you slice it to a constant length it will work (e.g '...val[:2]')"); + return NULL; + } + if (len == 0 && !expr_is_const(arg)) + { + SEMA_ERROR(arg, "A non-constant zero size splat is not allowed."); + return NULL; + } + Expr **new_args = sema_splat_arraylike_insert(context, exprs, inner, len, i); + if (!new_args) return false; + if (!exprs) return NULL; + count = vec_size(exprs); + expand = true; + break; + } } } while (expand); return exprs; @@ -8194,14 +8233,14 @@ static inline Type *sema_evaluate_type_copy(SemaContext *context, TypeInfo *type INLINE bool lambda_parameter_match(Decl **ct_lambda_params, Decl *candidate) { unsigned param_count = vec_size(ct_lambda_params); - assert(vec_size(candidate->func_decl.lambda_ct_parameters) == param_count); + ASSERT0(vec_size(candidate->func_decl.lambda_ct_parameters) == param_count); if (!param_count) return true; FOREACH_IDX(i, Decl *, param, candidate->func_decl.lambda_ct_parameters) { Decl *ct_param = ct_lambda_params[i]; if (!param->var.is_read) continue; - assert(ct_param->resolve_status == RESOLVE_DONE || param->resolve_status == RESOLVE_DONE); - assert(ct_param->var.kind == param->var.kind); + ASSERT0(ct_param->resolve_status == RESOLVE_DONE || param->resolve_status == RESOLVE_DONE); + ASSERT0(ct_param->var.kind == param->var.kind); switch (ct_param->var.kind) { case VARDECL_LOCAL_CT_TYPE: @@ -8253,7 +8292,7 @@ static inline Decl *sema_find_cached_lambda(SemaContext *context, Type *func_typ if (!info) return NULL; Type *type = sema_evaluate_type_copy(context, info); if (!type) return NULL; - assert(i < 198); + ASSERT0(i < 198); types[i + 1] = type; } @@ -8674,7 +8713,6 @@ static inline bool sema_expr_analyse_ct_defined(SemaContext *context, Expr *expr case EXPR_SLICE: case EXPR_SLICE_ASSIGN: case EXPR_SLICE_COPY: - case EXPR_SPLAT: case EXPR_SWIZZLE: case EXPR_SUBSCRIPT_ADDR: case EXPR_SUBSCRIPT_ASSIGN: @@ -8706,6 +8744,7 @@ static inline bool sema_expr_analyse_ct_defined(SemaContext *context, Expr *expr case EXPR_TYPEID_INFO: case EXPR_TYPECALL: case EXPR_MEMBER_GET: + case EXPR_SPLAT: if (!sema_analyse_expr(active_context, main_expr)) return false; break; } @@ -9085,13 +9124,14 @@ static inline bool sema_analyse_expr_dispatch(SemaContext *context, Expr *expr, case EXPR_NAMED_ARGUMENT: case EXPR_NOP: case EXPR_OPERATOR_CHARS: - case EXPR_SPLAT: case EXPR_SWIZZLE: case EXPR_TEST_HOOK: case EXPR_TRY_UNWRAP: case EXPR_TRY_UNWRAP_CHAIN: case EXPR_TYPEID_INFO: UNREACHABLE + case EXPR_SPLAT: + RETURN_SEMA_ERROR(expr, "Splat ('...') may only appear in initializers and calls."); case EXPR_TYPECALL: RETURN_SEMA_ERROR(expr, "Expected '()' after this."); case EXPR_OTHER_CONTEXT: @@ -9350,7 +9390,7 @@ static inline bool sema_cast_rvalue(SemaContext *context, Expr *expr) sema_expr_flatten_const_ident(expr->access_expr.parent); return true; case EXPR_TYPEINFO: - RETURN_SEMA_ERROR(expr, "A type must be followed by either (...) or '.'."); + RETURN_SEMA_ERROR(expr, "A type must be followed by either (...) or '.' unless passed as a macro type argument or assigned to a compile time type variable."); case EXPR_CT_IDENT: if (!sema_cast_ct_ident_rvalue(context, expr)) return false; break; @@ -9404,7 +9444,7 @@ bool sema_analyse_ct_expr(SemaContext *context, Expr *expr) bool sema_analyse_expr_value(SemaContext *context, Expr *expr) { - assert(expr); + ASSERT0(expr); switch (expr->resolve_status) { case RESOLVE_NOT_DONE: @@ -9424,7 +9464,7 @@ bool sema_analyse_expr_value(SemaContext *context, Expr *expr) static inline bool sema_analyse_expr_check(SemaContext *context, Expr *expr, CheckType check) { - assert(expr); + ASSERT0(expr); switch (expr->resolve_status) { case RESOLVE_NOT_DONE: @@ -9449,7 +9489,7 @@ bool sema_analyse_expr_address(SemaContext *context, Expr *expr) bool sema_analyse_expr_lvalue(SemaContext *context, Expr *expr) { - assert(expr); + ASSERT0(expr); return sema_analyse_expr_check(context, expr, CHECK_LVALUE); } diff --git a/src/compiler/sema_initializers.c b/src/compiler/sema_initializers.c index d26ff2712..c998dcd69 100644 --- a/src/compiler/sema_initializers.c +++ b/src/compiler/sema_initializers.c @@ -50,7 +50,7 @@ bool const_init_local_init_may_be_global_inner(ConstInitializer *init, bool top) return top; case CONST_INIT_STRUCT: list = init->init_struct; - assert(vec_size(init->type->decl->strukt.members) == vec_size(list)); + ASSERT0(vec_size(init->type->decl->strukt.members) == vec_size(list)); len = vec_size(list); break; case CONST_INIT_UNION: @@ -188,7 +188,7 @@ static inline void sema_not_enough_elements_error(SemaContext *context, Expr *in */ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *context, Decl *assigned, Expr *initializer) { - assert(assigned->resolve_status == RESOLVE_DONE); + ASSERT0(assigned->resolve_status == RESOLVE_DONE); Expr **elements = initializer->initializer_list; Decl **members = assigned->strukt.members; ArrayIndex size = (ArrayIndex)vec_size(elements); @@ -197,7 +197,7 @@ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *conte // 1. For struct number of members must be the same as the size of the struct. // Since we already handled the case with an empty initializer before going here // zero entries must be an error. - assert(size > 0 && "We should already have handled the size == 0 case."); + ASSERT0(size > 0 && "We should already have handled the size == 0 case."); // 2. We don't support this actually, but we used to. Maybe we will in the future. if (elements_needed == 0) @@ -214,8 +214,7 @@ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *conte { if (vec_size(assigned->strukt.members) > 1 && vec_size(elements) > 1) { - SEMA_ERROR(elements[0], "Bitstructs with @overlap must use designated initialization."); - return false; + RETURN_SEMA_ERROR(elements[0], "Bitstructs with @overlap must use designated initialization."); } } @@ -228,9 +227,8 @@ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *conte // user pinpoint where they put the double elements. if (i >= elements_needed) { - assert(i < size); - SEMA_ERROR(elements[i], "Too many elements in initializer, expected only %d.", elements_needed); - return false; + ASSERT0(i < size); + RETURN_SEMA_ERROR(elements[i], "Too many elements in initializer, expected only %d.", elements_needed); } // 5. We might have anonymous members Decl *member = members[i]; @@ -265,7 +263,7 @@ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *conte size -= reduce_by; elements_needed -= reduce_by; max_loop = size > elements_needed ? size : elements_needed; - assert(size <= vec_size(initializer->initializer_list)); + ASSERT0(size <= vec_size(initializer->initializer_list)); vec_resize(initializer->initializer_list, (unsigned)size); elements = initializer->initializer_list; elements[i] = new_initializer; @@ -284,16 +282,16 @@ static inline bool sema_expr_analyse_struct_plain_initializer(SemaContext *conte } optional = optional || IS_OPTIONAL(element); } - assert(initializer->type); + ASSERT0(initializer->type); if (optional) initializer->type = type_get_optional(initializer->type); // 6. There's the case of too few values as well. Mark the last field as wrong. - assert(elements_needed <= size); + ASSERT0(elements_needed <= size); initializer->resolve_status = RESOLVE_DONE; if (expr_is_runtime_const(initializer)) { bool is_union = type_flatten(initializer->type)->type_kind == TYPE_UNION; - assert(!is_union || vec_size(elements) == 1); + ASSERT0(!is_union || vec_size(elements) == 1); ConstInitializer *init; if (is_union) { @@ -331,17 +329,17 @@ static inline bool sema_expr_analyse_array_plain_initializer(SemaContext *contex // We have the case where "Foo = int[*]" if (inferred_len && !type_len_is_inferred(assigned)) { - assert(assigned->type_kind == TYPE_TYPEDEF); - assert(assigned->decl->decl_kind == DECL_TYPEDEF); + ASSERT0(assigned->type_kind == TYPE_TYPEDEF); + ASSERT0(assigned->decl->decl_kind == DECL_TYPEDEF); while (assigned->type_kind == TYPE_TYPEDEF) assigned = assigned->decl->type; - assert(type_len_is_inferred(assigned)); + ASSERT0(type_len_is_inferred(assigned)); } // Prefer the typedef index: define Bar = int; Bar[1] => Bar and not int Type *inner_type = type_get_indexed_type(assigned); - assert(inner_type); + ASSERT0(inner_type); unsigned count = vec_size(elements); unsigned expected_members = flattened->array.len; - assert(count > 0 && "We should already have handled the size == 0 case."); + ASSERT0(count > 0 && "We should already have handled the size == 0 case."); if (expected_members == 0 && !inferred_len) { @@ -443,7 +441,7 @@ static inline bool sema_expr_analyse_array_plain_initializer(SemaContext *contex initializer->type = assigned; } - assert(initializer->type); + ASSERT0(initializer->type); if (optional) initializer->type = type_get_optional(initializer->type); if (!inferred_len && expected_members > count) @@ -514,7 +512,7 @@ static bool sema_expr_analyse_designated_initializer(SemaContext *context, Type bool is_bitmember = member && member->decl_kind == DECL_VAR && member->var.kind == VARDECL_BITMEMBER; Expr *value = expr->designator_expr.value; if (!value && is_bitmember && member->var.start_bit == member->var.end_bit && type_flatten(result) == type_bool) { - assert(is_bitstruct); + ASSERT0(is_bitstruct); value = expr_new_const_bool(INVALID_SPAN, type_bool, true); expr->designator_expr.value = value; bitmember_count_without_value += 1; @@ -572,10 +570,10 @@ static inline bool sema_expr_analyse_initializer(SemaContext *context, Type *ass if (expr->expr_kind == EXPR_CONST) { - assert(expr->const_expr.const_kind == CONST_INITIALIZER); + ASSERT0(expr->const_expr.const_kind == CONST_INITIALIZER); return cast_implicit(context, expr, assigned_type, false); } - assert(expr->expr_kind == EXPR_INITIALIZER_LIST); + ASSERT0(expr->expr_kind == EXPR_INITIALIZER_LIST); // 2. Grab the expressions inside. Expr **init_expressions = expr->initializer_list; @@ -631,13 +629,13 @@ static void sema_create_const_initializer_from_designated_init(ConstInitializer { // Flatten the type since the external type might be typedef or a distinct type. const_init_rewrite_to_zero(const_init, type_flatten(initializer->type)); - assert(type_flatten(initializer->type)->type_kind != TYPE_SLICE); + ASSERT0(type_flatten(initializer->type)->type_kind != TYPE_SLICE); // Loop through the initializers. FOREACH(Expr *, expr, initializer->initializer_list) { DesignatorElement **path = expr->designator_expr.path; Expr *value = expr->designator_expr.value; - assert(value); + ASSERT0(value); sema_update_const_initializer_with_designator(const_init, path, path + vec_size(path), value); } } @@ -658,7 +656,7 @@ void sema_invert_bitstruct_const_initializer(ConstInitializer *initializer) } } - assert(vec_size(initializer->init_struct) == len); + ASSERT0(vec_size(initializer->init_struct) == len); FOREACH_IDX(i, ConstInitializer *, init, initializer->init_struct) { Type *type = init->type; @@ -720,7 +718,7 @@ ConstInitializer *sema_merge_bitstruct_const_initializers(ConstInitializer *lhs, UNREACHABLE } } - assert(lhs->kind == CONST_INIT_STRUCT && rhs->kind == CONST_INIT_STRUCT); + ASSERT0(lhs->kind == CONST_INIT_STRUCT && rhs->kind == CONST_INIT_STRUCT); ConstInitializer **lhs_inits = lhs->init_struct; ConstInitializer **rhs_inits = rhs->init_struct; Decl **members = lhs->type->decl->strukt.members; @@ -762,7 +760,7 @@ ConstInitializer *sema_merge_bitstruct_const_initializers(ConstInitializer *lhs, } continue; } - assert(type_is_integer(type_flatten(init_lhs->type))); + ASSERT0(type_is_integer(type_flatten(init_lhs->type))); switch (op) { case BINARYOP_BIT_AND: @@ -784,7 +782,7 @@ ConstInitializer *sema_merge_bitstruct_const_initializers(ConstInitializer *lhs, bool sema_expr_analyse_initializer_list(SemaContext *context, Type *to, Expr *expr) { if (!to) to = type_untypedlist; - assert(to); + ASSERT0(to); Type *flattened = type_flatten(to); bool is_zero_init = (expr->expr_kind == EXPR_INITIALIZER_LIST && !vec_size(expr->initializer_list)) || sema_initializer_list_is_empty(expr); @@ -872,14 +870,14 @@ void const_init_rewrite_to_value(ConstInitializer *const_init, Expr *value) { *const_init = *value->const_expr.initializer; value->const_expr.initializer = const_init; - assert(type_flatten(value->type)->type_kind != TYPE_SLICE); + ASSERT0(type_flatten(value->type)->type_kind != TYPE_SLICE); return; } if (value->expr_kind == EXPR_IDENTIFIER) { Decl *ident = decl_flatten(value->identifier_expr.decl); - assert(ident->decl_kind == DECL_VAR); - assert(ident->var.kind == VARDECL_CONST); + ASSERT0(ident->decl_kind == DECL_VAR); + ASSERT0(ident->var.kind == VARDECL_CONST); const_init_rewrite_to_value(const_init, expr_copy(ident->var.init_expr)); return; } @@ -905,7 +903,7 @@ static inline void sema_update_const_initializer_with_designator_struct(ConstIni { // Get the current path element that we're processing DesignatorElement *element = curr[0]; - assert(element->kind == DESIGNATOR_FIELD); + ASSERT0(element->kind == DESIGNATOR_FIELD); DesignatorElement **next_element = curr + 1; bool is_last_path_element = next_element == end; @@ -913,7 +911,7 @@ static inline void sema_update_const_initializer_with_designator_struct(ConstIni if (is_last_path_element && sema_initializer_list_is_empty(value)) { const_init->kind = CONST_INIT_ZERO; - assert(type_flatten(value->type)->type_kind != TYPE_SLICE); + ASSERT0(type_flatten(value->type)->type_kind != TYPE_SLICE); return; } Decl **elements = const_init->type->decl->strukt.members; @@ -933,7 +931,7 @@ static inline void sema_update_const_initializer_with_designator_struct(ConstIni } // We should always have expanded the struct at this point. - assert(const_init->kind == CONST_INIT_STRUCT); + ASSERT0(const_init->kind == CONST_INIT_STRUCT); // Find the ConstInitializer to change ConstInitializer *sub_element = const_init->init_struct[element->index]; // NOLINT @@ -963,7 +961,7 @@ static inline void sema_update_const_initializer_with_designator_union(ConstInit Expr *value) { DesignatorElement *element = curr[0]; - assert(element->kind == DESIGNATOR_FIELD); + ASSERT0(element->kind == DESIGNATOR_FIELD); ConstInitializer *sub_element = const_init->init_union.element; // If it's an empty initializer, just clear everything back to CONST_INIT_ZERO @@ -1020,7 +1018,7 @@ static inline void sema_update_const_initializer_with_designator_array(ConstInit DesignatorElement *element = curr[0]; ArrayIndex low_index = element->index; ArrayIndex high_index = element->kind == DESIGNATOR_RANGE ? element->index_end : element->index; - assert(element->kind == DESIGNATOR_ARRAY || element->kind == DESIGNATOR_RANGE); + ASSERT0(element->kind == DESIGNATOR_ARRAY || element->kind == DESIGNATOR_RANGE); // Expand zero into array. if (const_init->kind == CONST_INIT_ZERO) @@ -1041,7 +1039,7 @@ static inline void sema_update_const_initializer_with_designator_array(ConstInit for (ArrayIndex index = low_index; index <= high_index; index++) { - assert(insert_index >= array_count || array_elements); + ASSERT0(insert_index >= array_count || array_elements); // Walk to the insert point or until we reached the end of the array. while (insert_index < array_count && array_elements[insert_index]->init_array_value.index < index) { @@ -1066,7 +1064,7 @@ static inline void sema_update_const_initializer_with_designator_array(ConstInit // need to do an insert. if (initializer->init_array_value.index != insert_index) { - assert(initializer->init_array_value.index > insert_index); + ASSERT0(initializer->init_array_value.index > insert_index); // First we add a null at the end. vec_add(array_elements, NULL); @@ -1211,7 +1209,7 @@ static Type *sema_find_type_of_element(SemaContext *context, Type *type, Designa } return base; } - assert(element->kind == DESIGNATOR_FIELD); + ASSERT0(element->kind == DESIGNATOR_FIELD); if (!type_is_union_or_strukt(type_flattened) && type_flattened->type_kind != TYPE_BITSTRUCT) { return NULL; diff --git a/src/compiler/sema_internal.h b/src/compiler/sema_internal.h index e29830df1..cdbc331f7 100644 --- a/src/compiler/sema_internal.h +++ b/src/compiler/sema_internal.h @@ -24,15 +24,15 @@ #define ASSERT_SPANF(node__, check__, format__, ...) do { } while(0) #define ASSERT_SPAN(node__, check__) do { } while(0) #else -#define ASSERT_SPANF(node__, check__, format__, ...) do { if (!(check__)) { assert_print_line((node__)->span); eprintf(format__, __VA_ARGS__); assert(check__); } } while(0) -#define ASSERT_SPAN(node__, check__) do { if (!(check__)) { assert_print_line((node__)->span); assert(check__); } } while(0) +#define ASSERT_SPANF(node__, check__, format__, ...) do { if (!(check__)) { assert_print_line((node__)->span); eprintf(format__, __VA_ARGS__); ASSERT0(check__); } } while(0) +#define ASSERT_SPAN(node__, check__) do { if (!(check__)) { assert_print_line((node__)->span); ASSERT0(check__); } } while(0) #endif #define SCOPE_OUTER_START do { DynamicScope stored_scope = context->active_scope; context_change_scope_with_flags(context, SCOPE_NONE); -#define SCOPE_OUTER_END assert(context->active_scope.defer_last == context->active_scope.defer_start); context->active_scope = stored_scope; } while(0) +#define SCOPE_OUTER_END ASSERT0(context->active_scope.defer_last == context->active_scope.defer_start); context->active_scope = stored_scope; } while(0) #define SCOPE_START SCOPE_START_WITH_FLAGS(SCOPE_NONE) #define SCOPE_START_WITH_FLAGS(flags) do { DynamicScope old_scope = context->active_scope; context_change_scope_with_flags(context, flags); #define SCOPE_START_WITH_LABEL(label) do { DynamicScope old_scope = context->active_scope; context_change_scope_for_label(context, label); -#define SCOPE_END assert(context->active_scope.defer_last == context->active_scope.defer_start); context->active_scope = old_scope; } while(0) +#define SCOPE_END ASSERT0(context->active_scope.defer_last == context->active_scope.defer_start); context->active_scope = old_scope; } while(0) #define SCOPE_POP_ERROR() ((bool)(context->active_scope = old_scope, false)) #define SCOPE_ERROR_END_OUTER() do { context->active_scope = stored_scope; } while(0) #define PUSH_X(ast, X) AstId _old_##X##_defer = context->X##_defer; Ast *_old_##X = context->X##_target; context->X##_target = ast; context->X##_defer = context->active_scope.defer_last @@ -167,7 +167,7 @@ INLINE Attr* attr_find_kind(Attr **attrs, AttributeType attr_type) INLINE void sema_display_deprecated_warning_on_use(SemaContext *context, Decl *decl, SourceSpan span) { - assert(decl->resolve_status == RESOLVE_DONE); + ASSERT0(decl->resolve_status == RESOLVE_DONE); if (!decl->resolved_attributes || !decl->attrs_resolved || !decl->attrs_resolved->deprecated) return; const char *msg = decl->attrs_resolved->deprecated; diff --git a/src/compiler/sema_name_resolution.c b/src/compiler/sema_name_resolution.c index 46f77b39a..a5474d742 100644 --- a/src/compiler/sema_name_resolution.c +++ b/src/compiler/sema_name_resolution.c @@ -71,7 +71,7 @@ static bool add_interface_to_decl_stack(SemaContext *context, Decl *decl) if (!sema_analyse_decl(context, decl)) return false; FOREACH(TypeInfo *, parent_interface, decl->interfaces) { - assert(parent_interface->resolve_status == RESOLVE_DONE); + ASSERT0(parent_interface->resolve_status == RESOLVE_DONE); Decl *inf = parent_interface->type->decl; if (!sema_analyse_decl(context, inf)) return false; add_interface_to_decl_stack(context, inf); @@ -149,7 +149,7 @@ static bool sema_find_decl_in_private_imports(SemaContext *context, NameResolve // No match, so continue if (!found) continue; - assert(found->visibility != VISIBLE_LOCAL); + ASSERT0(found->visibility != VISIBLE_LOCAL); // Did we already have a match? if (decl) @@ -393,7 +393,7 @@ static bool sema_resolve_path_symbol(SemaContext *context, NameResolve *name_res Decl *decl = NULL; name_resolve->path_found = NULL; name_resolve->found = NULL; - assert(name_resolve->path && "Expected path."); + ASSERT0(name_resolve->path && "Expected path."); const char *symbol = name_resolve->symbol; // 0. std module special handling. @@ -464,7 +464,7 @@ static inline Decl *sema_find_local(SemaContext *context, const char *symbol) static bool sema_resolve_no_path_symbol(SemaContext *context, NameResolve *name_resolve) { const char *symbol = name_resolve->symbol; - assert(name_resolve->path == NULL); + ASSERT0(name_resolve->path == NULL); Decl *decl; @@ -569,7 +569,7 @@ static int module_closest_ident_names(Module *module, const char *name, Decl* ma } static void sema_report_error_on_decl(SemaContext *context, NameResolve *name_resolve) { - assert(!name_resolve->suppress_error); + ASSERT0(!name_resolve->suppress_error); const char *symbol = name_resolve->symbol; SourceSpan span = name_resolve->span; Decl *found = name_resolve->found; @@ -615,7 +615,7 @@ static void sema_report_error_on_decl(SemaContext *context, NameResolve *name_re if (name_resolve->ambiguous_other_decl) { - assert(found); + ASSERT0(found); const char *symbol_type = decl_to_name(found); const char *found_path = found->unit->module->name->module; const char *other_path = name_resolve->ambiguous_other_decl->unit->module->name->module; @@ -643,7 +643,7 @@ static void sema_report_error_on_decl(SemaContext *context, NameResolve *name_re } return; } - assert(!found); + ASSERT0(!found); if (path_name) { // A common mistake is to type println and printfln @@ -778,7 +778,7 @@ Decl *sema_resolve_method_in_module(Module *module, Type *actual_type, const cha *private_found = found; found = NULL; } - assert(!found || found->visibility != VISIBLE_LOCAL); + ASSERT0(!found || found->visibility != VISIBLE_LOCAL); if (found && search_type == METHOD_SEARCH_CURRENT) return found; // We are now searching submodules, so hide the private ones. if (search_type == METHOD_SEARCH_CURRENT) search_type = METHOD_SEARCH_SUBMODULE_CURRENT; @@ -843,7 +843,7 @@ UNUSED bool sema_check_type_variable_array(SemaContext *context, TypeInfo *type_ } break; } - assert(type->type_kind == TYPE_STRUCT); + ASSERT0(type->type_kind == TYPE_STRUCT); if (type->decl->has_variable_array) { SEMA_ERROR(type_info, "Arrays of structs with flexible array members is not allowed."); @@ -903,7 +903,7 @@ bool sema_resolve_type_decl(SemaContext *context, Type *type) Decl *sema_resolve_type_method(CompilationUnit *unit, Type *type, const char *method_name, Decl **ambiguous_ref, Decl **private_ref) { - assert(type == type->canonical); + ASSERT0(type == type->canonical); Decl *private = NULL; Decl *ambiguous = NULL; Decl *found = sema_find_extension_method_in_list(unit->local_method_extensions, type, method_name); @@ -911,7 +911,7 @@ Decl *sema_resolve_type_method(CompilationUnit *unit, Type *type, const char *me if (ambiguous) { *ambiguous_ref = ambiguous; - assert(found); + ASSERT0(found); return found; } @@ -1083,7 +1083,7 @@ Decl *sema_resolve_symbol(SemaContext *context, const char *symbol, Path *path, }; if (!sema_resolve_symbol_common(context, &resolve)) return NULL; Decl *found = resolve.found; - assert(found); + ASSERT0(found); if (!decl_ok(found)) return NULL; return resolve.found; } @@ -1091,7 +1091,7 @@ Decl *sema_resolve_symbol(SemaContext *context, const char *symbol, Path *path, static inline void sema_append_local(SemaContext *context, Decl *decl) { - assert(!decl_is_ct_var(decl)); + ASSERT0(!decl_is_ct_var(decl)); Decl ***locals = &context->locals; size_t locals_size = vec_size(*locals); size_t current_local = context->active_scope.current_local; @@ -1112,7 +1112,7 @@ static inline void sema_append_local(SemaContext *context, Decl *decl) INLINE bool sema_add_ct_local(SemaContext *context, Decl *decl) { - assert(decl_is_ct_var(decl)); + ASSERT0(decl_is_ct_var(decl)); Decl *other = sema_find_ct_local(context, decl->name); if (other) @@ -1164,7 +1164,7 @@ void sema_unwrap_var(SemaContext *context, Decl *decl) void sema_rewrap_var(SemaContext *context, Decl *decl) { - assert(decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_UNWRAPPED && decl->var.alias->type->type_kind == TYPE_OPTIONAL); + ASSERT0(decl->decl_kind == DECL_VAR && decl->var.kind == VARDECL_UNWRAPPED && decl->var.alias->type->type_kind == TYPE_OPTIONAL); sema_append_local(context, decl->var.alias); } @@ -1179,7 +1179,7 @@ void sema_erase_var(SemaContext *context, Decl *decl) void sema_erase_unwrapped(SemaContext *context, Decl *decl) { - assert(IS_OPTIONAL(decl)); + ASSERT0(IS_OPTIONAL(decl)); Decl *rewrapped = decl_copy(decl); rewrapped->var.kind = VARDECL_REWRAPPED; rewrapped->var.alias = decl; diff --git a/src/compiler/sema_passes.c b/src/compiler/sema_passes.c index 9322d6d0e..139b7429c 100644 --- a/src/compiler/sema_passes.c +++ b/src/compiler/sema_passes.c @@ -77,7 +77,7 @@ void sema_analysis_pass_process_imports(Module *module) { // 3. Begin analysis Decl *import = imports[i]; - assert(import->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(import->resolve_status == RESOLVE_NOT_DONE); import->resolve_status = RESOLVE_RUNNING; // 4. Find the module. Path *path = import->import.path; @@ -186,7 +186,7 @@ static Decl **sema_load_include(CompilationUnit *unit, Decl *decl) static bool exec_arg_append_to_scratch(Expr *arg) { - assert(expr_is_const(arg)); + ASSERT0(expr_is_const(arg)); switch (arg->const_expr.const_kind) { case CONST_FLOAT: @@ -272,7 +272,7 @@ static Decl **sema_run_exec(CompilationUnit *unit, Decl *decl) FOREACH_IDX(i, Expr *, arg, decl->exec_decl.args) { if (i) scratch_buffer_append(" "); - assert(expr_is_const(arg)); + ASSERT0(expr_is_const(arg)); if (!exec_arg_append_to_scratch(arg)) { RETURN_PRINT_ERROR_AT(NULL, arg, "Bytes, initializers and member references may not be used as arguments."); @@ -363,7 +363,7 @@ void sema_analysis_pass_register_global_declarations(Module *module) FOREACH(CompilationUnit *, unit, module->units) { if (unit->if_attr) continue; - assert(!unit->ct_includes); + ASSERT0(!unit->ct_includes); unit->module = module; DEBUG_LOG("Processing %s.", unit->file->name); register_global_decls(unit, unit->global_decls); @@ -379,7 +379,7 @@ void sema_analysis_pass_process_includes(Module *module) if (unit->if_attr) continue; // Process all includes sema_process_includes(unit); - assert(vec_size(unit->ct_includes) == 0); + ASSERT0(vec_size(unit->ct_includes) == 0); } DEBUG_LOG("Pass finished with %d error(s).", compiler.context.errors_found); @@ -418,7 +418,7 @@ void sema_analysis_pass_register_conditional_units(Module *module) FOREACH(CompilationUnit *, unit, module->units) { // All ct_includes should already be registered. - assert(!unit->ct_includes); + ASSERT0(!unit->ct_includes); Attr *if_attr = unit->if_attr; if (!if_attr && !unit->attr_links) continue; @@ -450,7 +450,7 @@ void sema_analysis_pass_register_conditional_units(Module *module) { Expr **exprs = attr->exprs; unsigned args = vec_size(exprs); - assert(args > 0 && "Should already have been checked."); + ASSERT0(args > 0 && "Should already have been checked."); Expr *cond = args > 1 ? attr->exprs[0] : NULL; if (cond && !sema_analyse_expr(&context, cond)) goto FAIL_CONTEXT; bool start = cond && expr_is_const_bool(cond) ? 1 : 0; @@ -594,7 +594,7 @@ INLINE void sema_analyse_inner_func_ptr(SemaContext *c, Decl *decl) } if (inner->type_kind != TYPE_FUNC_PTR) return; Type *func = inner->pointer; - assert(func->type_kind == TYPE_FUNC_RAW); + ASSERT0(func->type_kind == TYPE_FUNC_RAW); if (!sema_resolve_type_decl(c, func)) decl_poison(decl); } diff --git a/src/compiler/sema_stmts.c b/src/compiler/sema_stmts.c index d0a764d21..bc3391a3e 100644 --- a/src/compiler/sema_stmts.c +++ b/src/compiler/sema_stmts.c @@ -184,7 +184,7 @@ static inline bool sema_analyse_break_stmt(SemaContext *context, Ast *statement) defer_begin = context->break_defer; } - assert(parent); + ASSERT0(parent); parent->flow.has_break = true; statement->contbreak_stmt.ast = astid(parent); @@ -257,10 +257,10 @@ static inline bool sema_analyse_continue_stmt(SemaContext *context, Ast *stateme **/ static void sema_unwrappable_from_catch_in_else(SemaContext *c, Expr *cond) { - assert(cond->expr_kind == EXPR_COND && "Assumed cond"); + ASSERT0(cond->expr_kind == EXPR_COND && "Assumed cond"); Expr *last = VECLAST(cond->cond_expr); - assert(last); + ASSERT0(last); // Dive into any cast, because it might have been cast into boolean. while (last->expr_kind == EXPR_CAST) @@ -278,7 +278,7 @@ static void sema_unwrappable_from_catch_in_else(SemaContext *c, Expr *cond) Decl *decl = expr->identifier_expr.decl; if (decl->decl_kind != DECL_VAR) continue; - assert(decl->type->type_kind == TYPE_OPTIONAL && "The variable should always be optional at this point."); + ASSERT0(decl->type->type_kind == TYPE_OPTIONAL && "The variable should always be optional at this point."); // Note that we could possibly have "if (catch x, x)" and in this case we'd // unwrap twice, but that isn't really a problem. @@ -306,7 +306,7 @@ static inline bool assert_create_from_contract(SemaContext *context, Ast *direct { directive = copy_ast_single(directive); Expr *declexpr = directive->contract_stmt.contract.decl_exprs; - assert(declexpr->expr_kind == EXPR_EXPRESSION_LIST); + ASSERT0(declexpr->expr_kind == EXPR_EXPRESSION_LIST); FOREACH(Expr *, expr, declexpr->expression_list) { @@ -382,7 +382,7 @@ static inline bool sema_check_return_matches_opt_returns(SemaContext *context, E if (!sema_cast_const(inner)) return true; // Here we have a const optional return. - assert(ret_expr->inner_expr->const_expr.const_kind == CONST_ERR); + ASSERT0(ret_expr->inner_expr->const_expr.const_kind == CONST_ERR); Decl *fault = ret_expr->inner_expr->const_expr.enum_err_val; // Check that we find it. @@ -401,7 +401,7 @@ static inline bool sema_check_return_matches_opt_returns(SemaContext *context, E static bool sema_analyse_macro_constant_ensures(SemaContext *context, Expr *ret_expr) { - assert(context->current_macro); + ASSERT0(context->current_macro); // This is a per return check, so we don't do it if the return expression is missing, // or if it is optional, or – obviously - if there are no '@ensure'. if (!ret_expr || !context->macro_has_ensures || IS_OPTIONAL(ret_expr)) return true; @@ -423,7 +423,7 @@ static bool sema_analyse_macro_constant_ensures(SemaContext *context, Expr *ret_ doc_directive = directive->next; if (directive->contract_stmt.kind != CONTRACT_ENSURE) continue; Expr *checks = copy_expr_single(directive->contract_stmt.contract.decl_exprs); - assert(checks->expr_kind == EXPR_EXPRESSION_LIST); + ASSERT0(checks->expr_kind == EXPR_EXPRESSION_LIST); Expr **exprs = checks->expression_list; FOREACH(Expr *, expr, exprs) { @@ -463,7 +463,7 @@ static bool sema_analyse_macro_constant_ensures(SemaContext *context, Expr *ret_ static inline bool sema_analyse_block_exit_stmt(SemaContext *context, Ast *statement) { bool is_macro = (context->active_scope.flags & SCOPE_MACRO) != 0; - assert(context->active_scope.flags & (SCOPE_EXPR_BLOCK | SCOPE_MACRO)); + ASSERT0(context->active_scope.flags & (SCOPE_EXPR_BLOCK | SCOPE_MACRO)); statement->ast_kind = AST_BLOCK_EXIT_STMT; context->active_scope.jump_end = true; Type *block_type = context->expected_block_type; @@ -577,7 +577,7 @@ static inline bool sema_analyse_return_stmt(SemaContext *context, Ast *statement context->active_scope.jump_end = true; Type *expected_rtype = context->rtype; - assert(expected_rtype && "We should always have known type from a function return."); + ASSERT0(expected_rtype && "We should always have known type from a function return."); Expr *return_expr = statement->return_stmt.expr; if (return_expr) @@ -634,7 +634,7 @@ static inline bool sema_analyse_return_stmt(SemaContext *context, Ast *statement } SKIP_ENSURE:; - assert(type_no_optional(statement->return_stmt.expr->type)->canonical == type_no_optional(expected_rtype)->canonical); + ASSERT0(type_no_optional(statement->return_stmt.expr->type)->canonical == type_no_optional(expected_rtype)->canonical); return true; } @@ -667,6 +667,7 @@ static inline bool sema_expr_valid_try_expression(Expr *expr) case EXPR_LAST_FAULT: case EXPR_TYPECALL: case EXPR_MEMBER_GET: + case EXPR_SPLAT: return false; case EXPR_BITACCESS: case EXPR_BUILTIN: @@ -698,7 +699,6 @@ static inline bool sema_expr_valid_try_expression(Expr *expr) case EXPR_SLICE: case EXPR_SLICE_ASSIGN: case EXPR_SLICE_COPY: - case EXPR_SPLAT: case EXPR_STRINGIFY: case EXPR_SUBSCRIPT: case EXPR_SWIZZLE: @@ -723,7 +723,7 @@ static inline bool sema_expr_valid_try_expression(Expr *expr) } static inline bool sema_analyse_try_unwrap(SemaContext *context, Expr *expr) { - assert(expr->expr_kind == EXPR_TRY_UNWRAP); + ASSERT0(expr->expr_kind == EXPR_TRY_UNWRAP); Expr *ident = expr->try_unwrap_expr.variable; Expr *optional = expr->try_unwrap_expr.init; @@ -783,7 +783,7 @@ static inline bool sema_analyse_try_unwrap(SemaContext *context, Expr *expr) // 3a. If we had a variable type, then our expression must be an identifier. if (ident->expr_kind != EXPR_IDENTIFIER) RETURN_SEMA_ERROR(ident, "A variable name was expected here."); - assert(ident->resolve_status != RESOLVE_DONE); + ASSERT0(ident->resolve_status != RESOLVE_DONE); if (ident->identifier_expr.path) RETURN_SEMA_ERROR(ident->identifier_expr.path, "The variable may not have a path."); if (ident->identifier_expr.is_const) RETURN_SEMA_ERROR(ident, "Expected a variable starting with a lower case letter."); const char *ident_name = ident->identifier_expr.ident; @@ -832,9 +832,9 @@ static inline bool sema_analyse_try_unwrap(SemaContext *context, Expr *expr) static inline bool sema_analyse_try_unwrap_chain(SemaContext *context, Expr *expr, CondType cond_type, CondResult *result) { - assert(cond_type == COND_TYPE_UNWRAP_BOOL || cond_type == COND_TYPE_UNWRAP); + ASSERT0(cond_type == COND_TYPE_UNWRAP_BOOL || cond_type == COND_TYPE_UNWRAP); - assert(expr->expr_kind == EXPR_TRY_UNWRAP_CHAIN); + ASSERT0(expr->expr_kind == EXPR_TRY_UNWRAP_CHAIN); FOREACH(Expr *, chain_element, expr->try_unwrap_chain_expr) { @@ -877,7 +877,7 @@ static inline bool sema_analyse_catch_unwrap(SemaContext *context, Expr *expr) RETURN_SEMA_ERROR(ident, "A variable name was expected here."); } - assert(ident->resolve_status != RESOLVE_DONE); + ASSERT0(ident->resolve_status != RESOLVE_DONE); if (ident->identifier_expr.path) RETURN_SEMA_ERROR(ident->identifier_expr.path, "The variable may not have a path."); if (ident->identifier_expr.is_const) RETURN_SEMA_ERROR(ident, "Expected a variable starting with a lower case letter."); @@ -907,7 +907,7 @@ RESOLVE_EXPRS:; static void sema_remove_unwraps_from_try(SemaContext *c, Expr *cond) { - assert(cond->expr_kind == EXPR_COND); + ASSERT0(cond->expr_kind == EXPR_COND); Expr *last = VECLAST(cond->cond_expr); if (!last || last->expr_kind != EXPR_TRY_UNWRAP_CHAIN) return; FOREACH(Expr *, expr, last->try_unwrap_chain_expr) @@ -1014,7 +1014,7 @@ NORMAL_EXPR:; */ static inline bool sema_analyse_cond_list(SemaContext *context, Expr *expr, CondType cond_type, CondResult *result) { - assert(expr->expr_kind == EXPR_COND); + ASSERT0(expr->expr_kind == EXPR_COND); Expr **dexprs = expr->cond_expr; unsigned entries = vec_size(dexprs); @@ -1055,7 +1055,7 @@ static inline bool sema_analyse_cond_list(SemaContext *context, Expr *expr, Cond static inline bool sema_analyse_cond(SemaContext *context, Expr *expr, CondType cond_type, CondResult *result) { bool cast_to_bool = cond_type == COND_TYPE_UNWRAP_BOOL; - assert(expr->expr_kind == EXPR_COND && "Conditional expressions should always be of type EXPR_DECL_LIST"); + ASSERT0(expr->expr_kind == EXPR_COND && "Conditional expressions should always be of type EXPR_DECL_LIST"); // 1. Analyse the declaration list. ScopeFlags current_flags = context->active_scope.flags; @@ -1274,7 +1274,7 @@ static inline bool sema_analyse_for_stmt(SemaContext *context, Ast *statement) bool is_infinite = false; Ast *body = astptr(statement->for_stmt.body); - assert(body); + ASSERT0(body); if (body->ast_kind == AST_DEFER_STMT) { RETURN_SEMA_ERROR(body, "Looping over a raw 'defer' is not allowed, was this a mistake?"); @@ -1327,7 +1327,7 @@ static inline bool sema_analyse_for_stmt(SemaContext *context, Ast *statement) // Rewrite do { } while(true) to while(true) { } if (is_infinite) { - assert(!statement->for_stmt.cond); + ASSERT0(!statement->for_stmt.cond); statement->for_stmt.flow.skip_first = false; } } @@ -1404,7 +1404,7 @@ static inline bool sema_analyse_foreach_stmt(SemaContext *context, Ast *statemen if (statement->foreach_stmt.index_by_ref) { - assert(index); + ASSERT0(index); RETURN_SEMA_ERROR(index, "The index cannot be held by reference, did you accidentally add a '&'?"); } @@ -1432,7 +1432,7 @@ static inline bool sema_analyse_foreach_stmt(SemaContext *context, Ast *statemen } // At this point we should have dereferenced any pointer or bailed. - assert(!type_is_pointer(enumerator->type)); + ASSERT0(!type_is_pointer(enumerator->type)); // Check that we can even index this expression. @@ -1464,7 +1464,7 @@ static inline bool sema_analyse_foreach_stmt(SemaContext *context, Ast *statemen } if (!decl_ok(len) || !decl_ok(by_val) || !decl_ok(by_ref)) return false; index_macro = value_by_ref ? by_ref : by_val; - assert(index_macro); + ASSERT0(index_macro); index_type = index_macro->func_decl.signature.params[1]->type; if (!type_is_integer(index_type)) { @@ -1513,7 +1513,7 @@ SKIP_OVERLOAD:; // We either have "foreach (x : some_var)" or "foreach (x : some_call())" // So we grab the former by address (implicit &) and the latter as the value. - assert(enumerator->resolve_status == RESOLVE_DONE); + ASSERT0(enumerator->resolve_status == RESOLVE_DONE); bool is_addr = false; bool is_variable = false; if (enumerator->expr_kind == EXPR_IDENTIFIER) @@ -1995,7 +1995,7 @@ static bool sema_analyse_nextcase_stmt(SemaContext *context, Ast *statement) statement->nextcase_stmt.switch_expr = NULL; if (!value) { - assert(context->next_target); + ASSERT0(context->next_target); statement->nextcase_stmt.defer_id = context_get_defers(context, context->active_scope.defer_last, parent->switch_stmt.defer, true); statement->nextcase_stmt.case_switch_stmt = astid(context->next_target); return true; @@ -2077,7 +2077,7 @@ static inline bool sema_analyse_then_overwrite(SemaContext *context, Ast *statem AstId next = statement->next; *statement = *astptr(replacement); AstId current = astid(statement); - assert(current); + ASSERT0(current); while (current) { Ast *ast = ast_next(¤t); @@ -2115,7 +2115,7 @@ static inline bool sema_analyse_ct_if_stmt(SemaContext *context, Ast *statement) if (sema_analyse_then_overwrite(context, statement, elif->ct_else_stmt)) goto SUCCESS; goto FAILED; } - assert(elif->ast_kind == AST_CT_IF_STMT); + ASSERT0(elif->ast_kind == AST_CT_IF_STMT); res = sema_check_comp_time_bool(context, elif->ct_if_stmt.expr); if (res == COND_MISSING) goto FAILED; @@ -2179,7 +2179,7 @@ static inline bool sema_check_type_case(SemaContext *context, Type *switch_type, static inline bool sema_check_value_case(SemaContext *context, Type *switch_type, Ast *case_stmt, Ast **cases, unsigned index, bool *if_chained, bool *max_ranged) { - assert(switch_type); + ASSERT0(switch_type); Expr *expr = exprptr(case_stmt->case_stmt.expr); Expr *to_expr = exprptrzero(case_stmt->case_stmt.to_expr); @@ -2256,7 +2256,7 @@ INLINE const char *create_missing_enums_in_switch_error(Ast **cases, unsigned ca for (unsigned i = 0; i < case_count; i++) { Expr *e = exprptr(cases[i]->case_stmt.expr); - assert(expr_is_const_enum(e)); + ASSERT0(expr_is_const_enum(e)); if (e->const_expr.enum_err_val == decl) goto CONTINUE; } if (++printed != 1) @@ -2296,7 +2296,7 @@ static bool sema_analyse_switch_body(SemaContext *context, Ast *statement, Sourc bool if_chain = !is_enum_switch && !type_kind_is_any_integer(flat_switch_type_kind); Ast *default_case = NULL; - assert(context->active_scope.defer_start == context->active_scope.defer_last); + ASSERT0(context->active_scope.defer_start == context->active_scope.defer_last); bool exhaustive = false; unsigned case_count = vec_size(cases); @@ -2437,7 +2437,7 @@ static inline bool sema_analyse_ct_switch_stmt(SemaContext *context, Ast *statem if (expr_is_const_string(cond)) break; FALLTHROUGH; default: - assert(cond); + ASSERT0(cond); SEMA_ERROR(cond, "Only types, strings, enums, integers, floats and booleans may be used with '$switch'."); // NOLINT goto FAILED; } @@ -2446,7 +2446,7 @@ static inline bool sema_analyse_ct_switch_stmt(SemaContext *context, Ast *statem Ast **cases = statement->ct_switch_stmt.body; unsigned case_count = vec_size(cases); - assert(case_count <= INT32_MAX); + ASSERT0(case_count <= INT32_MAX); int matched_case = (int)case_count; int default_case = (int)case_count; @@ -2526,7 +2526,7 @@ static inline bool sema_analyse_ct_switch_stmt(SemaContext *context, Ast *statem } if (is_type) { - assert(const_expr == const_to_expr); + ASSERT0(const_expr == const_to_expr); Type *switch_type = switch_expr_const->typeid; Type *case_type = const_expr->typeid; if (matched_case > i && type_is_subtype(case_type->canonical, switch_type->canonical)) @@ -2828,7 +2828,7 @@ static inline bool sema_analyse_ct_for_stmt(SemaContext *context, Ast *statement if ((init = statement->for_stmt.init)) { Expr *init_expr = exprptr(init); - assert(init_expr->expr_kind == EXPR_EXPRESSION_LIST); + ASSERT0(init_expr->expr_kind == EXPR_EXPRESSION_LIST); // Check the list of expressions. FOREACH(Expr *, expr, init_expr->expression_list) @@ -2855,7 +2855,7 @@ static inline bool sema_analyse_ct_for_stmt(SemaContext *context, Ast *statement AstId start = 0; AstId *current = &start; Expr **incr_list = incr ? exprptr(incr)->expression_list : NULL; - assert(condition); + ASSERT0(condition); // We set a maximum of macro iterations. // we might consider reducing this. unsigned current_ct_scope = sema_context_push_ct_stack(context); @@ -3001,7 +3001,7 @@ static bool sema_analyse_require(SemaContext *context, Ast *directive, AstId **a static bool sema_analyse_ensure(SemaContext *context, Ast *directive) { Expr *declexpr = directive->contract_stmt.contract.decl_exprs; - assert(declexpr->expr_kind == EXPR_EXPRESSION_LIST); + ASSERT0(declexpr->expr_kind == EXPR_EXPRESSION_LIST); FOREACH(Expr *, expr, declexpr->expression_list) { @@ -3054,7 +3054,7 @@ NEXT:; void sema_append_contract_asserts(AstId assert_first, Ast* compound_stmt) { - assert(compound_stmt->ast_kind == AST_COMPOUND_STMT); + ASSERT0(compound_stmt->ast_kind == AST_COMPOUND_STMT); if (!assert_first) return; Ast *ast = new_ast(AST_COMPOUND_STMT, compound_stmt->span); ast->compound_stmt.first_stmt = assert_first; @@ -3096,7 +3096,7 @@ bool sema_analyse_function_body(SemaContext *context, Decl *func) Signature *signature = &func->func_decl.signature; FunctionPrototype *prototype = func->type->function.prototype; - assert(prototype); + ASSERT0(prototype); context->original_inline_line = 0; context->original_module = NULL; context->call_env = (CallEnv) { @@ -3121,11 +3121,11 @@ bool sema_analyse_function_body(SemaContext *context, Decl *func) context->next_target = 0; context->next_switch = 0; context->break_target = 0; - assert(func->func_decl.body); + ASSERT0(func->func_decl.body); Ast *body = astptr(func->func_decl.body); Decl **lambda_params = NULL; SCOPE_START - assert(context->active_scope.depth == 1); + ASSERT0(context->active_scope.depth == 1); FOREACH(Decl *, param, signature->params) { if (!sema_add_local(context, param)) return false; @@ -3148,7 +3148,7 @@ bool sema_analyse_function_body(SemaContext *context, Decl *func) if (!is_naked) sema_append_contract_asserts(assert_first, body); Type *canonical_rtype = type_no_optional(prototype->rtype)->canonical; if (!sema_analyse_compound_statement_no_scope(context, body)) return false; - assert(context->active_scope.depth == 1); + ASSERT0(context->active_scope.depth == 1); if (!context->active_scope.jump_end && canonical_rtype != type_void) { SEMA_ERROR(func, "Missing return statement at the end of the function."); diff --git a/src/compiler/sema_types.c b/src/compiler/sema_types.c index 7fcc15ad4..652115ec4 100644 --- a/src/compiler/sema_types.c +++ b/src/compiler/sema_types.c @@ -186,7 +186,7 @@ static inline bool sema_resolve_array_type(SemaContext *context, TypeInfo *type, default: UNREACHABLE } - assert(!type->array.len || sema_cast_const(type->array.len)); + ASSERT0(!type->array.len || sema_cast_const(type->array.len)); type->resolve_status = RESOLVE_DONE; return true; } @@ -248,8 +248,8 @@ static bool sema_resolve_type_identifier(SemaContext *context, TypeInfo *type_in SEMA_ERROR(type_info, "You need to assign a type to '%s' before using it.", decl->name); return false; } - assert(decl->var.init_expr->expr_kind == EXPR_TYPEINFO); - assert(decl->var.init_expr->resolve_status == RESOLVE_DONE); + ASSERT0(decl->var.init_expr->expr_kind == EXPR_TYPEINFO); + ASSERT0(decl->var.init_expr->resolve_status == RESOLVE_DONE); *type_info = *decl->var.init_expr->type_expr; return true; } @@ -371,7 +371,7 @@ INLINE bool sema_resolve_generic_type(SemaContext *context, TypeInfo *type_info) { RETURN_SEMA_ERROR(inner, "Parameterization required a concrete type name here."); } - assert(inner->resolve_status == RESOLVE_NOT_DONE); + ASSERT0(inner->resolve_status == RESOLVE_NOT_DONE); bool was_recursive = false; Decl *type = sema_analyse_parameterized_identifier(context, inner->unresolved.path, inner->unresolved.name, @@ -513,7 +513,7 @@ FuncMap map; void type_func_prototype_init(uint32_t capacity) { - assert(is_power_of_two(capacity) && capacity > 1); + ASSERT0(is_power_of_two(capacity) && capacity > 1); map.entries = CALLOC(capacity * sizeof(FuncTypeEntry)); map.capacity = capacity; map.max_load = (uint32_t)(TABLE_MAX_LOAD * capacity); diff --git a/src/compiler/semantic_analyser.c b/src/compiler/semantic_analyser.c index ccc0b7a26..2a771e5b6 100644 --- a/src/compiler/semantic_analyser.c +++ b/src/compiler/semantic_analyser.c @@ -124,7 +124,7 @@ void context_pop_defers_and_replace_ast(SemaContext *context, Ast *ast) AstId defer_first = 0; context_pop_defers(context, &defer_first); if (!defer_first) return; - assert(ast->ast_kind != AST_COMPOUND_STMT); + ASSERT0(ast->ast_kind != AST_COMPOUND_STMT); Ast *replacement = ast_copy(ast); ast->ast_kind = AST_COMPOUND_STMT; ast->compound_stmt = (AstCompoundStmt) { .first_stmt = astid(replacement) }; @@ -252,7 +252,7 @@ static void register_generic_decls(CompilationUnit *unit, Decl **decls) static void analyze_generic_module(Module *module) { - assert(module->parameters && module->is_generic); + ASSERT0(module->parameters && module->is_generic); FOREACH(CompilationUnit *, unit, module->units) { register_generic_decls(unit, unit->global_decls); diff --git a/src/compiler/source_file.c b/src/compiler/source_file.c index 5857eacc1..ea406c9dc 100644 --- a/src/compiler/source_file.c +++ b/src/compiler/source_file.c @@ -18,7 +18,7 @@ static const size_t LEXER_FILES_START_CAPACITY = 128; File *source_file_by_id(FileId file) { if (file == STDIN_FILE_ID) return &stdin_file; - assert(file < vec_size(compiler.context.loaded_sources)); + ASSERT0(file < vec_size(compiler.context.loaded_sources)); return compiler.context.loaded_sources[file]; } diff --git a/src/compiler/symtab.c b/src/compiler/symtab.c index 045bdcb49..e440a7a4e 100644 --- a/src/compiler/symtab.c +++ b/src/compiler/symtab.c @@ -106,8 +106,8 @@ void symtab_init(uint32_t capacity) default: break; } - assert(type == i); - assert(symtab_add(name, (uint32_t)strlen(name), fnv1a(name, len), &type) == interned); + ASSERT0(type == i); + ASSERT0(symtab_add(name, (uint32_t)strlen(name), fnv1a(name, len), &type) == interned); } // Init some constant idents @@ -292,17 +292,17 @@ void symtab_init(uint32_t capacity) for (unsigned i = 0; i < NUMBER_OF_BUILTINS; i++) { - assert(builtin_list[i] && "Missing builtin"); + ASSERT0(builtin_list[i] && "Missing builtin"); } for (unsigned i = 0; i < NUMBER_OF_TYPE_PROPERTIES; i++) { - assert(type_property_list[i] && "Missing type property"); + ASSERT0(type_property_list[i] && "Missing type property"); } for (unsigned i = 0; i < NUMBER_OF_BUILTIN_DEFINES; i++) { - assert(builtin_defines[i] && "Missing builtin define"); + ASSERT0(builtin_defines[i] && "Missing builtin define"); } type = TOKEN_AT_IDENT; @@ -365,7 +365,7 @@ void symtab_init(uint32_t capacity) for (unsigned i = 0; i < NUMBER_OF_ATTRIBUTES; i++) { - assert(attribute_list[i] && "Missing attributes"); + ASSERT0(attribute_list[i] && "Missing attributes"); } } @@ -392,7 +392,7 @@ const char *symtab_preset(const char *data, TokenType type) uint32_t len = (uint32_t)strlen(data); TokenType result = type; const char *res = symtab_add(data, len, fnv1a(data, len), &result); - assert(result == type); + ASSERT0(result == type); return res; } @@ -432,7 +432,7 @@ const char *symtab_add(const char *data, uint32_t len, uint32_t fnv1hash, TokenT void stable_init(STable *table, uint32_t initial_size) { - assert(initial_size && "Size must be larger than 0"); + ASSERT0(initial_size && "Size must be larger than 0"); assert (is_power_of_two(initial_size) && "Must be a power of two"); SEntry *entries = CALLOC(initial_size * sizeof(Entry)); @@ -487,7 +487,7 @@ static inline void stable_resize(STable *table) void *stable_set(STable *table, const char *key, void *value) { - assert(value && "Cannot insert NULL"); + ASSERT0(value && "Cannot insert NULL"); SEntry *entry = sentry_find(table->entries, table->capacity, key); void *old = entry->value; if (old == value) return old; @@ -516,7 +516,7 @@ void *stable_get(STable *table, const char *key) void htable_init(HTable *table, uint32_t initial_size) { - assert(initial_size && "Size must be larger than 0"); + ASSERT0(initial_size && "Size must be larger than 0"); size_t size = next_highest_power_of_2(initial_size); size_t mem_size = initial_size * sizeof(HTEntry); @@ -527,7 +527,7 @@ void htable_init(HTable *table, uint32_t initial_size) void *htable_set(HTable *table, void *key, void *value) { - assert(value && "Cannot insert NULL"); + ASSERT0(value && "Cannot insert NULL"); uint32_t idx = (((uintptr_t)key) ^ ((uintptr_t)key) >> 8) & table->mask; HTEntry **entry_ref = &table->entries[idx]; HTEntry *entry = *entry_ref; diff --git a/src/compiler/target.c b/src/compiler/target.c index 3fcb48909..b83e3cc43 100644 --- a/src/compiler/target.c +++ b/src/compiler/target.c @@ -1873,7 +1873,7 @@ void target_setup(BuildTarget *target) } compiler.platform.target_triple = arch_to_target_triple[target->arch_os_target]; - assert(compiler.platform.target_triple); + ASSERT0(compiler.platform.target_triple); compiler.platform.alloca_address_space = 0; @@ -2094,7 +2094,7 @@ void target_setup(BuildTarget *target) compiler.platform.target_triple = strdup(llvm_macos_target_triple(compiler.platform.target_triple)); } - assert(compiler.platform.reloc_model != RELOC_DEFAULT); + ASSERT0(compiler.platform.reloc_model != RELOC_DEFAULT); // TODO remove type_setup(&compiler.platform); diff --git a/src/compiler/types.c b/src/compiler/types.c index 56895a17d..bc3a8ba26 100644 --- a/src/compiler/types.c +++ b/src/compiler/types.c @@ -303,7 +303,7 @@ const char *type_to_error_string(Type *type) bool type_is_matching_int(CanonicalType *type1, CanonicalType *type2) { - assert(type1->canonical == type1 && type2->canonical == type2); + ASSERT0(type1->canonical == type1 && type2->canonical == type2); TypeKind typekind1 = type1->type_kind; TypeKind typekind2 = type2->type_kind; if (typekind1 == typekind2) return type_kind_is_any_integer(typekind1); @@ -318,11 +318,11 @@ TypeSize type_size(Type *type) switch (type->type_kind) { case TYPE_BITSTRUCT: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); type = type->decl->strukt.container_type->type; goto RETRY; case TYPE_DISTINCT: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); type = type->decl->distinct->type; goto RETRY; case TYPE_VECTOR: @@ -346,12 +346,12 @@ TypeSize type_size(Type *type) type = type_iptr->canonical; goto RETRY; case TYPE_ENUM: - assert(type->decl->enums.type_info->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->enums.type_info->resolve_status == RESOLVE_DONE); type = type->decl->enums.type_info->type->canonical; goto RETRY; case TYPE_STRUCT: case TYPE_UNION: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); return type->decl->strukt.size; case TYPE_VOID: return 1; @@ -377,7 +377,7 @@ TypeSize type_size(Type *type) FunctionPrototype *type_get_resolved_prototype(Type *type) { - assert(type->type_kind == TYPE_FUNC_RAW); + ASSERT0(type->type_kind == TYPE_FUNC_RAW); FunctionPrototype *prototype = type->function.prototype; if (!prototype->is_resolved) c_abi_func_create(prototype); return prototype; @@ -466,7 +466,7 @@ bool type_is_abi_aggregate(Type *type) Type *type_find_largest_union_element(Type *type) { - assert(type->type_kind == TYPE_UNION); + ASSERT0(type->type_kind == TYPE_UNION); ByteSize largest = 0; Type *largest_type = NULL; FOREACH(Decl *, member, type->decl->strukt.members) @@ -652,7 +652,7 @@ void type_mangle_introspect_name_to_buffer(Type *type) bool type_func_match(Type *fn_type, Type *rtype, unsigned arg_count, ...) { - assert(type_is_func_ptr(fn_type)); + ASSERT0(type_is_func_ptr(fn_type)); Signature *sig = fn_type->pointer->function.signature; if (rtype->canonical != typeget(sig->rtype)->canonical) return false; if (vec_size(sig->params) != arg_count) return false; @@ -715,7 +715,7 @@ AlignSize type_abi_alignment(Type *type) goto RETRY; case TYPE_STRUCT: case TYPE_UNION: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); return type->decl->alignment; UNREACHABLE case TYPE_BOOL: @@ -744,7 +744,7 @@ AlignSize type_abi_alignment(Type *type) static inline void create_type_cache(Type *type) { - assert(type->type_cache == NULL); + ASSERT0(type->type_cache == NULL); for (int i = 0; i < ARRAY_OFFSET; i++) { vec_add(type->type_cache, NULL); @@ -921,14 +921,14 @@ Type *type_get_ptr_recurse(Type *ptr_type) } Type *type_get_ptr(Type *ptr_type) { - assert(ptr_type->type_kind != TYPE_FUNC_RAW); - assert(!type_is_optional(ptr_type)); + ASSERT0(ptr_type->type_kind != TYPE_FUNC_RAW); + ASSERT0(!type_is_optional(ptr_type)); return type_generate_ptr(ptr_type, false); } Type *type_get_func_ptr(Type *func_type) { - assert(func_type->type_kind == TYPE_FUNC_RAW); + ASSERT0(func_type->type_kind == TYPE_FUNC_RAW); if (func_type->func_ptr) return func_type->func_ptr; Type *type = func_type->func_ptr = type_new(TYPE_FUNC_PTR, func_type->name); type->pointer = func_type; @@ -938,38 +938,38 @@ Type *type_get_func_ptr(Type *func_type) Type *type_get_optional(Type *optional_type) { - assert(!type_is_optional(optional_type)); + ASSERT0(!type_is_optional(optional_type)); return type_generate_optional(optional_type, false); } Type *type_get_slice(Type *arr_type) { - assert(type_is_valid_for_array(arr_type)); + ASSERT0(type_is_valid_for_array(arr_type)); return type_generate_slice(arr_type, false); } Type *type_get_inferred_array(Type *arr_type) { - assert(type_is_valid_for_array(arr_type)); + ASSERT0(type_is_valid_for_array(arr_type)); return type_generate_inferred_array(arr_type, false); } Type *type_get_inferred_vector(Type *arr_type) { - assert(type_is_valid_for_array(arr_type)); + ASSERT0(type_is_valid_for_array(arr_type)); return type_generate_inferred_vector(arr_type, false); } Type *type_get_flexible_array(Type *arr_type) { - assert(type_is_valid_for_array(arr_type)); + ASSERT0(type_is_valid_for_array(arr_type)); return type_generate_flexible_array(arr_type, false); } static inline bool array_structurally_equivalent_to_struct(Type *array, Type *type) { - assert(array->type_kind == TYPE_ARRAY); + ASSERT0(array->type_kind == TYPE_ARRAY); ArrayIndex len = (ArrayIndex)array->array.len; if (!len) return type_size(type) == 0; @@ -978,7 +978,7 @@ static inline bool array_structurally_equivalent_to_struct(Type *array, Type *ty if (len == 1 && type_is_structurally_equivalent(base, type)) return true; - assert(type->type_kind != TYPE_UNION && "Does not work on unions"); + ASSERT0(type->type_kind != TYPE_UNION && "Does not work on unions"); if (!type_is_union_or_strukt(type)) return false; @@ -1154,7 +1154,7 @@ static Type *type_create_array(Type *element_type, ArraySize len, bool vector, b Type *type_get_array(Type *arr_type, ArraySize len) { - assert(type_is_valid_for_array(arr_type)); + ASSERT0(type_is_valid_for_array(arr_type)); return type_create_array(arr_type, len, false, false); } @@ -1173,7 +1173,7 @@ bool type_is_valid_for_vector(Type *type) case TYPE_ANYFAULT: return true; case TYPE_DISTINCT: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); type = type->decl->distinct->type; goto RETRY; case TYPE_TYPEDEF: @@ -1190,7 +1190,7 @@ bool type_is_valid_for_array(Type *type) switch (type->type_kind) { case TYPE_DISTINCT: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); type = type->decl->distinct->type; goto RETRY; case TYPE_ANY: @@ -1213,7 +1213,7 @@ bool type_is_valid_for_array(Type *type) case TYPE_VECTOR: return true; case TYPE_TYPEDEF: - assert(type->decl->resolve_status == RESOLVE_DONE); + ASSERT0(type->decl->resolve_status == RESOLVE_DONE); type = type->canonical; goto RETRY; case TYPE_FLEXIBLE_ARRAY: @@ -1242,14 +1242,14 @@ Type *type_get_vector_bool(Type *original_type) Type *type_get_vector(Type *vector_type, unsigned len) { - assert(type_is_valid_for_vector(vector_type)); + ASSERT0(type_is_valid_for_vector(vector_type)); return type_create_array(vector_type, len, true, false); } static void type_create(const char *name, Type *location, TypeKind kind, unsigned bitsize, unsigned align, unsigned pref_align) { - assert(align); + ASSERT0(align); unsigned byte_size = (bitsize + 7) / 8; *location = (Type) { .type_kind = kind, @@ -1267,7 +1267,7 @@ static void type_create(const char *name, Type *location, TypeKind kind, unsigne static void type_init(const char *name, Type *location, TypeKind kind, unsigned bitsize, AlignData align) { - assert(align.align); + ASSERT0(align.align); unsigned byte_size = (bitsize + 7) / 8; *location = (Type) { .type_kind = kind, @@ -1488,7 +1488,7 @@ bool type_is_scalar(Type *type) Type *type_find_parent_type(Type *type) { - assert(type->canonical); + ASSERT0(type->canonical); switch (type->type_kind) { case TYPE_DISTINCT: @@ -1514,7 +1514,7 @@ Type *type_find_parent_type(Type *type) */ bool type_is_subtype(Type *type, Type *possible_subtype) { - assert(type == type->canonical); + ASSERT0(type == type->canonical); while (possible_subtype) { possible_subtype = possible_subtype->canonical; @@ -1587,7 +1587,7 @@ static TypeCmpResult type_array_is_equivalent(SemaContext *context, Type *from, switch (from->type_kind) { case TYPE_INFERRED_ARRAY: - assert(to_kind != TYPE_INFERRED_ARRAY); + ASSERT0(to_kind != TYPE_INFERRED_ARRAY); if (to_kind != TYPE_ARRAY) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); case TYPE_ARRAY: @@ -1595,7 +1595,7 @@ static TypeCmpResult type_array_is_equivalent(SemaContext *context, Type *from, if (to->type_kind == TYPE_ARRAY && from->array.len != to->array.len) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); case TYPE_INFERRED_VECTOR: - assert(to_kind != TYPE_INFERRED_VECTOR); + ASSERT0(to_kind != TYPE_INFERRED_VECTOR); if (to->type_kind != TYPE_VECTOR) return TYPE_MISMATCH; return type_array_element_is_equivalent(context, from->array.base, to->array.base, is_explicit); case TYPE_VECTOR: @@ -1627,8 +1627,14 @@ TypeCmpResult type_array_element_is_equivalent(SemaContext *context, Type *eleme } switch (element1->type_kind) { + case TYPE_FUNC_PTR: + if (element2 == type_voidptr) return TYPE_SAME; + if (element1->type_kind != TYPE_FUNC_PTR) return TYPE_MISMATCH; + if (element1->pointer->function.prototype->raw_type == element2->pointer->function.prototype->raw_type) return TYPE_SAME; + return TYPE_MISMATCH; case TYPE_POINTER: - if (element2->type_kind != TYPE_POINTER) return TYPE_MISMATCH; + if (element2->type_kind == TYPE_FUNC_PTR && type_voidptr == element1) return TYPE_SAME; + if (!type_is_pointer(element2)) return TYPE_MISMATCH; return type_is_pointer_equivalent(context, element1, element2, is_explicit); case TYPE_STRUCT: if (is_explicit) return type_is_structurally_equivalent(element1, element2) ? TYPE_SAME : TYPE_MISMATCH; @@ -1775,8 +1781,8 @@ Type *type_find_max_num_type(Type *num_type, Type *other_num) { TypeKind kind = num_type->type_kind; TypeKind other_kind = other_num->type_kind; - assert(kind <= other_kind && "Expected ordering"); - assert(kind != other_kind); + ASSERT0(kind <= other_kind && "Expected ordering"); + ASSERT0(kind != other_kind); // 1. The only conversions need to happen if the other type is a number. if (other_kind < TYPE_INTEGER_FIRST || other_kind > TYPE_FLOAT_LAST) return NULL; @@ -1799,7 +1805,7 @@ Type *type_find_max_num_type(Type *num_type, Type *other_num) } // Handle integer <=> integer conversions. - assert(type_kind_is_any_integer(other_kind) && type_is_integer(num_type)); + ASSERT0(type_kind_is_any_integer(other_kind) && type_is_integer(num_type)); // 4. Check the bit sizes. unsigned other_bit_size = other_num->builtin.bitsize; @@ -1880,7 +1886,7 @@ static inline Type *type_find_max_ptr_type(Type *type, Type *other) Type *type_decay_array_pointer(Type *type) { - assert(type->type_kind == TYPE_POINTER); + ASSERT0(type->type_kind == TYPE_POINTER); Type *ptr = type->pointer; switch (ptr->type_kind) { @@ -1894,9 +1900,9 @@ Type *type_decay_array_pointer(Type *type) #define MAX_SEARCH_DEPTH 512 static inline Type *type_find_max_distinct_type(Type *left, Type *right) { - assert(left == left->canonical && right == right->canonical); - assert(left != right); - assert(left->type_kind == TYPE_DISTINCT && right->type_kind == TYPE_DISTINCT); + ASSERT0(left == left->canonical && right == right->canonical); + ASSERT0(left != right); + ASSERT0(left->type_kind == TYPE_DISTINCT && right->type_kind == TYPE_DISTINCT); static Type *left_types[MAX_SEARCH_DEPTH]; int depth = 0; while (depth < MAX_SEARCH_DEPTH) @@ -1910,7 +1916,7 @@ static inline Type *type_find_max_distinct_type(Type *left, Type *right) { error_exit("Common ancestor search depth %d exceeded.", MAX_SEARCH_DEPTH); } - assert(left != right); + ASSERT0(left != right); while (true) { for (int i = 0; i < depth; i++) @@ -1927,7 +1933,7 @@ Type *type_find_max_type(Type *type, Type *other) { type = type->canonical; other = other->canonical; - assert(!type_is_optional(type) && !type_is_optional(other)); + ASSERT0(!type_is_optional(type) && !type_is_optional(other)); RETRY_DISTINCT: if (type == other) return type; diff --git a/src/main.c b/src/main.c index d2b7b31de..7797c6552 100644 --- a/src/main.c +++ b/src/main.c @@ -12,7 +12,7 @@ jmp_buf on_error_jump; NORETURN void exit_compiler(int exit_value) { - assert(exit_value != 0); + ASSERT0(exit_value != 0); longjmp(on_error_jump, exit_value); } diff --git a/src/utils/common.h b/src/utils/common.h index 8c6419404..3d7da91c4 100644 --- a/src/utils/common.h +++ b/src/utils/common.h @@ -104,9 +104,14 @@ } while (0) #endif -#define FATAL_ERROR(_string, ...) do { error_exit("FATAL ERROR %s -> in %s @ in %s:%d ", _string, __func__, __FILE__, __LINE__, ##__VA_ARGS__); } while(0) - -#define ASSERT(_condition, _string, ...) while (!(_condition)) { FATAL_ERROR(_string, ##__VA_ARGS__); } +#define FATAL_ERROR(_string, ...) do { error_exit("\xe2\x9a\xa0\xef\xb8\x8f The compiler encountered an unexpected error: \"%s\".\n\n" \ + "- Function: %s(...)\n" \ + "- Source file: %s:%d\n\n" \ + "\xf0\x9f\x99\x8f Please consider taking the time to file an issue on GitHub, so that we can get it fixed:\n\n" \ + "https://github.com/c3lang/c3c/issues/new so that we can get it fixed.", _string, __func__, __FILE__, __LINE__, ##__VA_ARGS__); } while(0) + +#define ASSERT(_condition, _string) while (!(_condition)) { FATAL_ERROR(_string); } +#define ASSERT0(_condition) while (!(_condition)) { FATAL_ERROR("Violated assert: " #_condition); } #define WARNING(_string, ...) do { eprintf("WARNING: "); eprintf(_string, ##__VA_ARGS__); eprintf("\n"); } while(0) #define UNREACHABLE FATAL_ERROR("Should be unreachable"); diff --git a/src/utils/file_utils.c b/src/utils/file_utils.c index 2e62a84df..b10fc0088 100644 --- a/src/utils/file_utils.c +++ b/src/utils/file_utils.c @@ -240,7 +240,7 @@ char *file_read_all(const char *path, size_t *return_size) { error_exit("Failed to read file \"%s\".\n", path); } - assert(bytes_read == file_size); + ASSERT0(bytes_read == file_size); buffer[bytes_read] = '\0'; size_t offset = 0; @@ -527,8 +527,8 @@ extern int _chdrive(int drive); void file_copy_file(const char *src_path, const char *dst_path, bool overwrite) { - assert(src_path); - assert(dst_path); + ASSERT0(src_path); + ASSERT0(dst_path); #if (_MSC_VER) CopyFileW(win_utf8to16(src_path), win_utf8to16(dst_path), !overwrite); #else @@ -539,7 +539,7 @@ void file_copy_file(const char *src_path, const char *dst_path, bool overwrite) bool file_delete_file(const char *path) { - assert(path); + ASSERT0(path); #if (_MSC_VER) return DeleteFileW(win_utf8to16(path)); #else @@ -549,7 +549,7 @@ bool file_delete_file(const char *path) void file_delete_all_files_in_dir_with_suffix(const char *path, const char *suffix) { - assert(path); + ASSERT0(path); #if (_MSC_VER) const char *cmd = "del /q \"%s\\*%s\" >nul 2>&1"; #else diff --git a/src/utils/json.c b/src/utils/json.c index 30669b13a..97b74033a 100644 --- a/src/utils/json.c +++ b/src/utils/json.c @@ -345,7 +345,7 @@ void json_map_set(JSONObject *obj, const char *key, JSONObject *value) JSONObject *json_map_get(JSONObject *obj, const char *key) { - assert(obj->type == J_OBJECT); + ASSERT0(obj->type == J_OBJECT); FOREACH_IDX(i, const char *, a_key, obj->keys) { if (str_eq(a_key, key)) return obj->members[i]; diff --git a/src/utils/lib.h b/src/utils/lib.h index b120c2f58..ae2e56fe3 100644 --- a/src/utils/lib.h +++ b/src/utils/lib.h @@ -253,8 +253,8 @@ typedef struct static inline VHeader_* vec_new_(size_t element_size, size_t capacity) { - assert(capacity < UINT32_MAX); - assert(element_size < UINT32_MAX / 100); + ASSERT0(capacity < UINT32_MAX); + ASSERT0(element_size < UINT32_MAX / 100); VHeader_ *header = CALLOC(element_size * capacity + sizeof(VHeader_)); header->capacity = (uint32_t)capacity; return header; @@ -271,8 +271,8 @@ static inline void vec_resize(void *vec, uint32_t new_size) static inline void vec_pop(void *vec) { - assert(vec); - assert(vec_size(vec) > 0); + ASSERT0(vec); + ASSERT0(vec_size(vec) > 0); VHeader_ *header = vec; header[-1].size--; } @@ -280,9 +280,9 @@ static inline void vec_pop(void *vec) static inline void vec_erase_front(void *vec, unsigned to_erase) { if (!to_erase) return; - assert(vec); + ASSERT0(vec); unsigned size = vec_size(vec); - assert(size >= to_erase); + ASSERT0(size >= to_erase); void **vecptr = (void**)vec; for (int i = to_erase; i < size; i++) { @@ -294,9 +294,9 @@ static inline void vec_erase_front(void *vec, unsigned to_erase) static inline void vec_erase_at(void *vec, unsigned i) { - assert(vec); + ASSERT0(vec); unsigned size = vec_size(vec); - assert(size > i); + ASSERT0(size > i); void **vecptr = (void**)vec; for (int j = i + 1; j < size; j++) { @@ -712,3 +712,4 @@ const char *zip_dir_iterator(FILE *zip, ZipDirIterator *iterator); const char *zip_dir_iterator_next(ZipDirIterator *iterator, ZipFile *file); const char *zip_file_read(FILE *zip, ZipFile *file, void **buffer_ptr); const char *zip_file_write(FILE *zip, ZipFile *file, const char *dir, bool overwrite); + diff --git a/src/utils/malloc.c b/src/utils/malloc.c index eb3243f2d..9ecd3d2ff 100644 --- a/src/utils/malloc.c +++ b/src/utils/malloc.c @@ -33,7 +33,7 @@ void memory_release() void *calloc_string(size_t len) { - assert(len > 0); + ASSERT0(len > 0); allocations_done++; return vmem_alloc(&char_arena, len); } @@ -42,7 +42,7 @@ void *calloc_string(size_t len) // Simple bump allocator with buckets. void *calloc_arena(size_t mem) { - assert(mem > 0); + ASSERT0(mem > 0); // Round to multiple of 16 mem = (mem + 15U) & ~15ULL; allocations_done++; diff --git a/src/utils/malloc.h b/src/utils/malloc.h index fb04524a0..749441391 100644 --- a/src/utils/malloc.h +++ b/src/utils/malloc.h @@ -10,7 +10,7 @@ extern Vmem name##_arena; \ typedef unsigned type##Id; \ static inline type *name##_calloc(void) { return (type *)vmem_alloc(&name##_arena, sizeof(type)); } \ static inline void name##_arena_free(void) { vmem_free(&name##_arena); } \ -static inline type *name##ptr(type ## Id id) { assert(id); return ((type *)name##_arena.ptr) + id; } \ +static inline type *name##ptr(type ## Id id) { ASSERT0(id); return ((type *)name##_arena.ptr) + id; } \ static inline type *name##ptrzero(type ## Id id) { return id ? ((type *)name##_arena.ptr) + id : NULL; } \ static inline type##Id name##id(type *ptr) { return (unsigned) (ptr - ((type *)name##_arena.ptr)); } \ static inline type *name##_copy(type *ptr) { type *x = name##_calloc(); memcpy(x, ptr, sizeof(type)); return x; } diff --git a/src/utils/stringutils.c b/src/utils/stringutils.c index 7586ed48b..48a9af821 100644 --- a/src/utils/stringutils.c +++ b/src/utils/stringutils.c @@ -68,7 +68,7 @@ static const char *scan_past_underscore(const char *string) const char *str_unescape(char *string) { - assert(string[0] == '"'); + ASSERT0(string[0] == '"'); char c; size_t index = 0; while ((c = string++[0]) != '"') @@ -169,7 +169,7 @@ char *str_vprintf(const char *var, va_list list) } char *buffer = malloc_string((uint32_t)len + 1); int new_len = vsnprintf(buffer, len + 1, var, list); - assert(len == new_len); + ASSERT0(len == new_len); (void)new_len; return buffer; } diff --git a/src/utils/taskqueue.c b/src/utils/taskqueue.c index 512277619..0eefdd1d4 100644 --- a/src/utils/taskqueue.c +++ b/src/utils/taskqueue.c @@ -34,7 +34,7 @@ static void *taskqueue_thread(void *data) void taskqueue_run(int threads, Task **task_list) { - assert(threads > 0); + ASSERT0(threads > 0); pthread_t *pthreads = malloc(sizeof(pthread_t) * (unsigned)threads); TaskQueue queue = { .queue = task_list }; if (pthread_mutex_init(&queue.lock, NULL)) error_exit("Failed to set up mutex"); @@ -82,7 +82,7 @@ static unsigned WINAPI taskqueue_thread(LPVOID lpParam) void taskqueue_run(int threads, Task **task_list) { - assert(threads > 0); + ASSERT0(threads > 0); HANDLE *handles = malloc(sizeof(HANDLE) * (unsigned)threads); TaskQueue queue = { .queue = task_list }; InitializeCriticalSection(&queue.lock); diff --git a/src/utils/unzipper.c b/src/utils/unzipper.c index 536b88112..6ee47e5b9 100644 --- a/src/utils/unzipper.c +++ b/src/utils/unzipper.c @@ -109,7 +109,7 @@ const char *zip_dir_iterator(FILE *zip, ZipDirIterator *iterator) const char *zip_dir_iterator_next(ZipDirIterator *iterator, ZipFile *file) { - assert(iterator->current_file < iterator->files); + ASSERT0(iterator->current_file < iterator->files); iterator->current_file++; FILE *zip = iterator->file; if (fseek(zip, iterator->offset, SEEK_SET)) return "Cannot seek in c3l file!"; @@ -186,7 +186,7 @@ const char *zip_file_read(FILE *zip, ZipFile *file, void **buffer_ptr) } // Only deflate supported. - assert(file->compression_method == 8 && "Should already be checked."); + ASSERT0(file->compression_method == 8 && "Should already be checked."); // Deflate - using zlib z_stream strm = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .avail_in = 0, .next_in = Z_NULL }; @@ -263,7 +263,7 @@ const char *zip_file_write(FILE *zip, ZipFile *file, const char *dir, bool overw while (left_to_read) { size_t amount_to_read = left_to_read < ZIP_BUFFER_SIZE ? left_to_read : ZIP_BUFFER_SIZE; - assert(amount_to_read > 0); + ASSERT0(amount_to_read > 0); if (!read_all(zip, internal_buffer, amount_to_read)) { fclose(f); @@ -280,7 +280,7 @@ const char *zip_file_write(FILE *zip, ZipFile *file, const char *dir, bool overw } // Only deflate supported. - assert(file->compression_method == 8 && "Should already be checked."); + ASSERT0(file->compression_method == 8 && "Should already be checked."); // Deflate - using zlib z_stream strm = { .zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .avail_in = 0, .next_in = Z_NULL }; diff --git a/src/utils/vmem.c b/src/utils/vmem.c index 88a905ea6..983fc7a93 100644 --- a/src/utils/vmem.c +++ b/src/utils/vmem.c @@ -71,7 +71,7 @@ static inline void* mmap_allocate(Vmem *vmem, size_t to_allocate) #endif void *ptr = ((uint8_t *)vmem->ptr) + vmem->allocated; vmem->allocated = allocated_after; - assert(vmem->size > vmem->allocated); + ASSERT0(vmem->size > vmem->allocated); return ptr; } diff --git a/src/version.h b/src/version.h index 9c73f4bd7..77f2ee974 100644 --- a/src/version.h +++ b/src/version.h @@ -1,2 +1,2 @@ -#define COMPILER_VERSION "0.6.4" +#define COMPILER_VERSION "0.6.5" #define PRERELEASE 1 \ No newline at end of file diff --git a/test/test_suite/errors/ternary_void_fault.c3t b/test/test_suite/errors/ternary_void_fault.c3t new file mode 100644 index 000000000..df5d82daa --- /dev/null +++ b/test/test_suite/errors/ternary_void_fault.c3t @@ -0,0 +1,71 @@ +// #target: macos-x64 +module test; +fn void! x() {} +fn void main() +{ + int a; + @catch(a > 0 ? x() : x()); +} + +/* #expect: test.ll + +define void @test.main() #0 { +entry: + %a = alloca i32, align 4 + %blockret = alloca i64, align 8 + %f = alloca i64, align 8 + store i32 0, ptr %a, align 4 + br label %testblock + +testblock: ; preds = %entry + %0 = load i32, ptr %a, align 4 + %gt = icmp sgt i32 %0, 0 + br i1 %gt, label %cond.lhs, label %cond.rhs + +cond.lhs: ; preds = %testblock + %1 = call i64 @test.x() + %not_err = icmp eq i64 %1, 0 + %2 = call i1 @llvm.expect.i1(i1 %not_err, i1 true) + br i1 %2, label %after_check, label %assign_optional + +assign_optional: ; preds = %cond.lhs + store i64 %1, ptr %f, align 8 + br label %end_block + +after_check: ; preds = %cond.lhs + br label %cond.phi + +cond.rhs: ; preds = %testblock + %3 = call i64 @test.x() + %not_err1 = icmp eq i64 %3, 0 + %4 = call i1 @llvm.expect.i1(i1 %not_err1, i1 true) + br i1 %4, label %after_check3, label %assign_optional2 + +assign_optional2: ; preds = %cond.rhs + store i64 %3, ptr %f, align 8 + br label %end_block + +after_check3: ; preds = %cond.rhs + br label %cond.phi + +cond.phi: ; preds = %after_check3, %after_check + store i64 0, ptr %f, align 8 + br label %end_block + +end_block: ; preds = %cond.phi, %assign_optional2, %assign_optional + %5 = load i64, ptr %f, align 8 + %neq = icmp ne i64 %5, 0 + br i1 %neq, label %if.then, label %if.exit + +if.then: ; preds = %end_block + %6 = load i64, ptr %f, align 8 + store i64 %6, ptr %blockret, align 8 + br label %expr_block.exit + +if.exit: ; preds = %end_block + store i64 0, ptr %blockret, align 8 + br label %expr_block.exit + +expr_block.exit: ; preds = %if.exit, %if.then + ret void +} diff --git a/test/test_suite/functions/splat_init.c3t b/test/test_suite/functions/splat_init.c3t new file mode 100644 index 000000000..13d82a87f --- /dev/null +++ b/test/test_suite/functions/splat_init.c3t @@ -0,0 +1,40 @@ +// #target: macos-x64 +module splat; +import std; + +int a = 0; + +fn int[2] test() +{ + a++; + return { 2, 3 }; +} +fn void main() +{ + int[4] z = { a, ...test(), a }; +} + +/* #expect: splat.ll + +define void @splat.main() #0 { +entry: + %z = alloca [4 x i32], align 16 + %.anon = alloca [2 x i32], align 4 + %result = alloca [2 x i32], align 4 + %0 = load i32, ptr @splat.a, align 4 + store i32 %0, ptr %z, align 4 + %ptradd = getelementptr inbounds i8, ptr %z, i64 4 + %1 = call i64 @splat.test() + store i64 %1, ptr %result, align 4 + call void @llvm.memcpy.p0.p0.i32(ptr align 4 %.anon, ptr align 4 %result, i32 8, i1 false) + %2 = load i32, ptr %.anon, align 4 + store i32 %2, ptr %ptradd, align 4 + %ptradd1 = getelementptr inbounds i8, ptr %z, i64 8 + %ptradd2 = getelementptr inbounds i8, ptr %.anon, i64 4 + %3 = load i32, ptr %ptradd2, align 4 + store i32 %3, ptr %ptradd1, align 4 + %ptradd3 = getelementptr inbounds i8, ptr %z, i64 12 + %4 = load i32, ptr @splat.a, align 4 + store i32 %4, ptr %ptradd3, align 4 + ret void +}