From aac1bdf13ad0cbbb0cf3352b9ff678be7d48b8b6 Mon Sep 17 00:00:00 2001 From: John-John Tedro Date: Sat, 26 Aug 2023 03:07:12 +0200 Subject: [PATCH] Implement HashMap and HashSet using a forked hashbrown::raw (#627) --- .github/workflows/ci.yml | 2 +- crates/rune-cli/Cargo.toml | 2 +- crates/rune-cli/src/main.rs | 2 +- crates/rune-core/Cargo.toml | 2 +- crates/rune-core/src/lib.rs | 2 +- crates/rune-core/src/protocol.rs | 8 + crates/rune-languageserver/Cargo.toml | 2 +- crates/rune-languageserver/src/main.rs | 2 +- crates/rune-macros/Cargo.toml | 2 +- crates/rune-macros/src/any.rs | 475 ++- crates/rune-macros/src/context.rs | 43 + crates/rune-macros/src/function.rs | 73 +- crates/rune-macros/src/inst_display.rs | 5 +- crates/rune-macros/src/internals.rs | 3 + crates/rune-macros/src/lib.rs | 25 +- crates/rune-macros/src/macro_.rs | 5 +- crates/rune-macros/src/spanned.rs | 6 +- crates/rune-modules/Cargo.toml | 2 +- crates/rune-modules/src/lib.rs | 2 +- crates/rune-wasm/Cargo.toml | 2 +- crates/rune-wasm/src/lib.rs | 2 +- crates/rune/Cargo.toml | 7 +- crates/rune/src/compile/compile.rs | 13 +- crates/rune/src/compile/context.rs | 6 +- crates/rune/src/compile/error.rs | 15 + crates/rune/src/compile/ir/compiler.rs | 7 +- crates/rune/src/compile/ir/interpreter.rs | 5 +- crates/rune/src/compile/pool.rs | 5 +- crates/rune/src/compile/v1/assemble.rs | 25 +- crates/rune/src/compile/v1/scopes.rs | 10 +- crates/rune/src/doc.rs | 2 +- crates/rune/src/hashbrown.rs | 5 + crates/rune/src/hashbrown/fork.rs | 33 + crates/rune/src/hashbrown/fork/macros.rs | 70 + crates/rune/src/hashbrown/fork/raw/alloc.rs | 86 + crates/rune/src/hashbrown/fork/raw/bitmask.rs | 133 + crates/rune/src/hashbrown/fork/raw/generic.rs | 157 + crates/rune/src/hashbrown/fork/raw/mod.rs | 3640 +++++++++++++++++ crates/rune/src/hashbrown/fork/raw/neon.rs | 124 + crates/rune/src/hashbrown/fork/raw/sse2.rs | 149 + crates/rune/src/hashbrown/fork/scopeguard.rs | 73 + crates/rune/src/hashbrown/table.rs | 272 ++ crates/rune/src/hir/arena.rs | 4 +- crates/rune/src/hir/lowering.rs | 80 +- crates/rune/src/indexing/index.rs | 52 +- crates/rune/src/internal_macros.rs | 12 +- crates/rune/src/languageserver/completion.rs | 4 +- crates/rune/src/languageserver/state.rs | 18 +- crates/rune/src/lib.rs | 44 +- crates/rune/src/module/module.rs | 22 +- crates/rune/src/modules.rs | 1 + crates/rune/src/modules/any.rs | 2 +- crates/rune/src/modules/bytes.rs | 6 +- crates/rune/src/modules/collections.rs | 6 + .../rune/src/modules/collections/hash_map.rs | 362 +- .../rune/src/modules/collections/hash_set.rs | 354 +- .../rune/src/modules/collections/vec_deque.rs | 2 +- crates/rune/src/modules/f64.rs | 10 +- crates/rune/src/modules/hash.rs | 16 + crates/rune/src/modules/iter.rs | 16 +- crates/rune/src/modules/ops.rs | 68 +- crates/rune/src/modules/string.rs | 10 +- crates/rune/src/modules/tuple.rs | 21 + crates/rune/src/modules/vec.rs | 25 +- crates/rune/src/query/query.rs | 99 +- crates/rune/src/runtime.rs | 9 +- crates/rune/src/runtime/bytes.rs | 17 +- crates/rune/src/runtime/control_flow.rs | 2 +- crates/rune/src/runtime/format.rs | 15 +- crates/rune/src/runtime/from_value.rs | 24 +- crates/rune/src/runtime/function.rs | 2 +- crates/rune/src/runtime/future.rs | 16 +- crates/rune/src/runtime/generator.rs | 41 +- crates/rune/src/runtime/generator_state.rs | 15 +- crates/rune/src/runtime/hasher.rs | 60 + crates/rune/src/runtime/iterator.rs | 26 +- crates/rune/src/runtime/object.rs | 19 +- crates/rune/src/runtime/protocol_caller.rs | 5 +- crates/rune/src/runtime/range.rs | 4 +- crates/rune/src/runtime/range_from.rs | 4 +- crates/rune/src/runtime/range_full.rs | 4 +- crates/rune/src/runtime/range_inclusive.rs | 4 +- crates/rune/src/runtime/range_to.rs | 4 +- crates/rune/src/runtime/range_to_inclusive.rs | 4 +- crates/rune/src/runtime/shared.rs | 8 +- crates/rune/src/runtime/static_type.rs | 47 +- crates/rune/src/runtime/stream.rs | 21 +- crates/rune/src/runtime/tuple.rs | 37 +- crates/rune/src/runtime/type_info.rs | 1 + crates/rune/src/runtime/value.rs | 97 +- crates/rune/src/runtime/vec.rs | 38 +- crates/rune/src/runtime/vm_error.rs | 40 +- crates/rune/src/tests.rs | 2 +- crates/rune/src/tests/bug_344.rs | 10 +- crates/rune/src/tests/compiler_general.rs | 4 +- tools/import_hashbrown.ps1 | 8 + 96 files changed, 6356 insertions(+), 970 deletions(-) create mode 100644 crates/rune/src/hashbrown.rs create mode 100644 crates/rune/src/hashbrown/fork.rs create mode 100644 crates/rune/src/hashbrown/fork/macros.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/alloc.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/bitmask.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/generic.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/mod.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/neon.rs create mode 100644 crates/rune/src/hashbrown/fork/raw/sse2.rs create mode 100644 crates/rune/src/hashbrown/fork/scopeguard.rs create mode 100644 crates/rune/src/hashbrown/table.rs create mode 100644 crates/rune/src/modules/hash.rs create mode 100644 crates/rune/src/runtime/hasher.rs create mode 100644 tools/import_hashbrown.ps1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c1ab772eb..2195ca4af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,7 +58,7 @@ jobs: needs: basics steps: - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@1.67 + - uses: dtolnay/rust-toolchain@1.70 - uses: Swatinem/rust-cache@v2 - run: cargo build --workspace diff --git a/crates/rune-cli/Cargo.toml b/crates/rune-cli/Cargo.toml index 4c4391691..16a65b117 100644 --- a/crates/rune-cli/Cargo.toml +++ b/crates/rune-cli/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-cli" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "An interpreter for the Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-cli/src/main.rs b/crates/rune-cli/src/main.rs index 79b28f813..80e136f84 100644 --- a/crates/rune-cli/src/main.rs +++ b/crates/rune-cli/src/main.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 diff --git a/crates/rune-core/Cargo.toml b/crates/rune-core/Cargo.toml index d742afba4..2dfc4dd77 100644 --- a/crates/rune-core/Cargo.toml +++ b/crates/rune-core/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-core" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "Core components for the Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-core/src/lib.rs b/crates/rune-core/src/lib.rs index f76b7cd9b..ecca8a580 100644 --- a/crates/rune-core/src/lib.rs +++ b/crates/rune-core/src/lib.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 diff --git a/crates/rune-core/src/protocol.rs b/crates/rune-core/src/protocol.rs index 186c63dca..11f6afb56 100644 --- a/crates/rune-core/src/protocol.rs +++ b/crates/rune-core/src/protocol.rs @@ -478,4 +478,12 @@ define! { repr: Some("value?"), doc: ["Allows the `?` operator to apply to values of this type."], }; + + /// Protocol used when calculating a hash. + pub const [HASH, HASH_HASH]: Protocol = Protocol { + name: "hash", + hash: 0xf6cf2d9f416cef08u64, + repr: Some("let output = hash($value)"), + doc: ["Hash the given value."], + }; } diff --git a/crates/rune-languageserver/Cargo.toml b/crates/rune-languageserver/Cargo.toml index 57260af02..ed8c38d8e 100644 --- a/crates/rune-languageserver/Cargo.toml +++ b/crates/rune-languageserver/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-languageserver" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "A language server for the Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-languageserver/src/main.rs b/crates/rune-languageserver/src/main.rs index 21e53a4db..b71bd5847 100644 --- a/crates/rune-languageserver/src/main.rs +++ b/crates/rune-languageserver/src/main.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 diff --git a/crates/rune-macros/Cargo.toml b/crates/rune-macros/Cargo.toml index fd191c837..51c743910 100644 --- a/crates/rune-macros/Cargo.toml +++ b/crates/rune-macros/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-macros" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "Macros for the Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-macros/src/any.rs b/crates/rune-macros/src/any.rs index c75a37799..2ee7aa13a 100644 --- a/crates/rune-macros/src/any.rs +++ b/crates/rune-macros/src/any.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use proc_macro2::{Span, TokenStream}; +use proc_macro2::TokenStream; use quote::{quote, quote_spanned, ToTokens}; use rune_core::Hash; use syn::punctuated::Punctuated; @@ -25,18 +25,18 @@ impl syn::parse::Parse for InternalCall { } impl InternalCall { - pub fn expand(self) -> Result> { - let cx = Context::with_crate(); + pub(super) fn into_any_builder(self, cx: &Context) -> Result, ()> { let tokens = cx.tokens_with_module(None); let name = match self.path.segments.last() { Some(last) if last.arguments.is_empty() => last.ident.clone(), _ => { - return Err(vec![syn::Error::new( + cx.error(syn::Error::new( self.path.span(), "expected last component in path to be without parameters, give it an explicit name instead with `, \"Type\"`", - )]) + )); + return Err(()); } }; @@ -47,11 +47,24 @@ impl InternalCall { let type_hash = match crate::hash::build_type_hash(&item) { Ok(type_hash) => type_hash, - Err(error) => return Err(vec![error]), + Err(error) => { + cx.error(error); + return Err(()); + } }; + let attr = TypeAttr::default(); let name = syn::LitStr::new(&name.to_string(), name.span()); - expand_any(None, &self.path, type_hash, &name, &[], &tokens, &generics) + + Ok(TypeBuilder { + attr, + ident: self.path, + type_hash, + name, + installers: Vec::new(), + tokens, + generics, + }) } } @@ -69,29 +82,20 @@ impl syn::parse::Parse for Derive { } impl Derive { - pub(super) fn expand(self) -> Result> { - let cx = Context::new(); - - let Ok(attr) = cx.type_attrs(&self.input.attrs) else { - return Err(cx.errors.into_inner()); - }; + pub(super) fn into_any_builder(self, cx: &Context) -> Result, ()> { + let attr = cx.type_attrs(&self.input.attrs)?; let tokens = cx.tokens_with_module(attr.module.as_ref()); - let generics = &self.input.generics; let mut installers = Vec::new(); - let Ok(()) = expand_install_with(&cx, &self.input, &tokens, &attr, generics, &mut installers) else { - return Err(cx.errors.into_inner()); - }; + expand_install_with(cx, &self.input, &tokens, &attr, &mut installers)?; let name = match &attr.name { Some(name) => name, None => &self.input.ident, }; - let ident = &self.input.ident; - let mut item = match &attr.item { Some(item) => item.clone(), None => syn::Path { @@ -104,20 +108,23 @@ impl Derive { let type_hash = match crate::hash::build_type_hash(&item) { Ok(type_hash) => type_hash, - Err(error) => return Err(vec![error]), + Err(error) => { + cx.error(error); + return Err(()); + } }; let name = syn::LitStr::new(&name.to_string(), name.span()); - expand_any( - attr.builtin, - ident, + Ok(TypeBuilder { + attr, + ident: self.input.ident, type_hash, - &name, - &installers, - &tokens, - generics, - ) + name, + installers, + tokens, + generics: self.input.generics, + }) } } @@ -127,7 +134,6 @@ pub(crate) fn expand_install_with( input: &syn::DeriveInput, tokens: &Tokens, attr: &TypeAttr, - generics: &syn::Generics, installers: &mut Vec, ) -> Result<(), ()> { let ident = &input.ident; @@ -137,7 +143,7 @@ pub(crate) fn expand_install_with( expand_struct_install_with(cx, installers, ident, st, tokens, attr)?; } syn::Data::Enum(en) => { - expand_enum_install_with(cx, installers, ident, en, tokens, attr, generics)?; + expand_enum_install_with(cx, installers, ident, en, tokens, attr, &input.generics)?; } syn::Data::Union(..) => { cx.error(syn::Error::new_spanned( @@ -468,185 +474,310 @@ fn expand_enum_install_with( Ok(()) } -/// Expand the necessary implementation details for `Any`. -pub(super) fn expand_any( - builtin: Option, +pub struct TypeBuilder { + attr: TypeAttr, ident: T, type_hash: Hash, - name: &syn::LitStr, - installers: &[TokenStream], - tokens: &Tokens, - generics: &syn::Generics, -) -> Result> + name: syn::LitStr, + installers: Vec, + tokens: Tokens, + generics: syn::Generics, +} + +impl TypeBuilder where - T: Copy + ToTokens, + T: ToTokens, { - let Tokens { - any, - context_error, - hash, - module, - named, - pointer_guard, - raw_into_mut, - raw_into_ref, - raw_str, - shared, - type_info, - any_type_info, - type_of, - maybe_type_of, - full_type_of, - unsafe_to_value, - unsafe_to_ref, - unsafe_to_mut, - value, - vm_result, - install_with, - .. - } = &tokens; - - let (impl_generics, type_generics, where_clause) = generics.split_for_impl(); + /// Expand the necessary implementation details for `Any`. + pub(super) fn expand(self) -> TokenStream { + let TypeBuilder { + attr, + ident, + type_hash, + name, + installers, + tokens, + generics, + } = self; + + let Tokens { + any, + context_error, + hash, + module, + named, + pointer_guard, + raw_into_mut, + raw_into_ref, + raw_str, + shared, + type_info, + any_type_info, + type_of, + maybe_type_of, + full_type_of, + unsafe_to_value, + unsafe_to_ref, + unsafe_to_mut, + value, + vm_result, + install_with, + non_null, + box_, + static_type_mod, + from_value, + raw_ref, + raw_mut, + mut_, + ref_, + vm_try, + .. + } = &tokens; + + let (impl_generics, type_generics, where_clause) = generics.split_for_impl(); + + let generic_names = if attr.static_type.is_some() { + vec![] + } else { + generics.type_params().map(|v| &v.ident).collect::>() + }; - let generic_names = generics.type_params().map(|v| &v.ident).collect::>(); + let impl_named = if !generic_names.is_empty() { + quote! { + #[automatically_derived] + impl #impl_generics #named for #ident #type_generics #where_clause { + const BASE_NAME: #raw_str = #raw_str::from_str(#name); - let impl_named = if !generic_names.is_empty() { - quote! { - #[automatically_derived] - impl #impl_generics #named for #ident #type_generics #where_clause { - const BASE_NAME: #raw_str = #raw_str::from_str(#name); - - fn full_name() -> Box { - [#name, "<", &#(#generic_names::full_name(),)* ">"].join("").into_boxed_str() + fn full_name() -> #box_ { + [#name, "<", &#(#generic_names::full_name(),)* ">"].join("").into_boxed_str() + } } } - } - } else { - quote! { - #[automatically_derived] - impl #impl_generics #named for #ident #type_generics #where_clause { - const BASE_NAME: #raw_str = #raw_str::from_str(#name); + } else { + quote! { + #[automatically_derived] + impl #impl_generics #named for #ident #type_generics #where_clause { + const BASE_NAME: #raw_str = #raw_str::from_str(#name); + } } - } - }; + }; - let install_with = quote! { - #[automatically_derived] - impl #impl_generics #install_with for #ident #type_generics #where_clause { - fn install_with(#[allow(unused)] module: &mut #module) -> core::result::Result<(), #context_error> { - #(#installers)* - Ok(()) + let install_with = quote! { + #[automatically_derived] + impl #impl_generics #install_with for #ident #type_generics #where_clause { + fn install_with(#[allow(unused)] module: &mut #module) -> core::result::Result<(), #context_error> { + #(#installers)* + Ok(()) + } } - } - }; - - let any = if builtin.is_none() { - let type_hash = type_hash.into_inner(); - - let make_hash = if !generic_names.is_empty() { - quote!(#hash::new_with_type_parameters(#type_hash, #hash::parameters([#(<#generic_names as #type_of>::type_hash()),*]))) - } else { - quote!(#hash::new(#type_hash)) }; - let type_parameters = if !generic_names.is_empty() { - quote!(#hash::parameters([#(<#generic_names as #type_of>::type_hash()),*])) - } else { - quote!(#hash::EMPTY) - }; + let impl_type_of = if attr.builtin.is_none() { + let type_parameters = if !generic_names.is_empty() { + quote!(#hash::parameters([#(<#generic_names as #type_of>::type_hash()),*])) + } else { + quote!(#hash::EMPTY) + }; + + Some(quote! { + #[automatically_derived] + impl #impl_generics #type_of for #ident #type_generics #where_clause { + #[inline] + fn type_hash() -> #hash { + ::type_hash() + } - Some(quote! { - #[automatically_derived] - impl #impl_generics #any for #ident #type_generics #where_clause { - fn type_hash() -> #hash { - #make_hash + #[inline] + fn type_parameters() -> #hash { + #type_parameters + } + + #[inline] + fn type_info() -> #type_info { + #type_info::Any(#any_type_info::__private_new( + #raw_str::from_str(core::any::type_name::()), + ::type_hash(), + )) + } } - } - #[automatically_derived] - impl #impl_generics #type_of for #ident #type_generics #where_clause { - #[inline] - fn type_hash() -> #hash { - ::type_hash() + #[automatically_derived] + impl #impl_generics #maybe_type_of for #ident #type_generics #where_clause { + #[inline] + fn maybe_type_of() -> Option<#full_type_of> { + Some(::type_of()) + } } + }) + } else if let Some(ty) = attr.static_type { + Some(quote! { + #[automatically_derived] + impl #impl_generics #type_of for #ident #type_generics #where_clause { + #[inline] + fn type_hash() -> #hash { + #static_type_mod::#ty.hash + } - #[inline] - fn type_parameters() -> #hash { - #type_parameters + #[inline] + fn type_info() -> #type_info { + #type_info::StaticType(#static_type_mod::#ty) + } } - #[inline] - fn type_info() -> #type_info { - #type_info::Any(#any_type_info::__private_new( - #raw_str::from_str(core::any::type_name::()), - ::type_hash(), - )) + #[automatically_derived] + impl #impl_generics #maybe_type_of for #ident #type_generics #where_clause { + #[inline] + fn maybe_type_of() -> Option<#full_type_of> { + Some(::type_of()) + } } - } + }) + } else { + None + }; - #[automatically_derived] - impl #impl_generics #maybe_type_of for #ident #type_generics #where_clause { - #[inline] - fn maybe_type_of() -> Option<#full_type_of> { - Some(::type_of()) + let any = if attr.builtin.is_none() { + let type_hash = type_hash.into_inner(); + + let make_hash = if !generic_names.is_empty() { + quote!(#hash::new_with_type_parameters(#type_hash, #hash::parameters([#(<#generic_names as #type_of>::type_hash()),*]))) + } else { + quote!(#hash::new(#type_hash)) + }; + + Some(quote! { + #[automatically_derived] + impl #impl_generics #any for #ident #type_generics #where_clause { + fn type_hash() -> #hash { + #make_hash + } } - } - #[automatically_derived] - impl #impl_generics #unsafe_to_ref for #ident #type_generics #where_clause { - type Guard = #raw_into_ref; + #[automatically_derived] + impl #impl_generics #unsafe_to_ref for #ident #type_generics #where_clause { + type Guard = #raw_into_ref; - unsafe fn unsafe_to_ref<'a>(value: #value) -> #vm_result<(&'a Self, Self::Guard)> { - let (value, guard) = match value.into_any_ptr() { - #vm_result::Ok(value) => value, - #vm_result::Err(err) => return #vm_result::Err(err), - }; + unsafe fn unsafe_to_ref<'a>(value: #value) -> #vm_result<(&'a Self, Self::Guard)> { + let (value, guard) = match value.into_any_ptr() { + #vm_result::Ok(value) => value, + #vm_result::Err(err) => return #vm_result::Err(err), + }; - #vm_result::Ok((&*value, guard)) + #vm_result::Ok((#non_null::as_ref(&value), guard)) + } } - } - #[automatically_derived] - impl #impl_generics #unsafe_to_mut for #ident #type_generics #where_clause { - type Guard = #raw_into_mut; + #[automatically_derived] + impl #impl_generics #unsafe_to_mut for #ident #type_generics #where_clause { + type Guard = #raw_into_mut; - unsafe fn unsafe_to_mut<'a>(value: #value) -> #vm_result<(&'a mut Self, Self::Guard)> { - let (value, guard) = match value.into_any_mut() { - #vm_result::Ok(value) => value, - #vm_result::Err(err) => return #vm_result::Err(err), - }; + unsafe fn unsafe_to_mut<'a>(value: #value) -> #vm_result<(&'a mut Self, Self::Guard)> { + let (mut value, guard) = match value.into_any_mut() { + #vm_result::Ok(value) => value, + #vm_result::Err(err) => return #vm_result::Err(err), + }; - #vm_result::Ok((&mut *value, guard)) + #vm_result::Ok((#non_null::as_mut(&mut value), guard)) + } } - } - #[automatically_derived] - impl #impl_generics #unsafe_to_value for &#ident #type_generics #where_clause { - type Guard = #pointer_guard; + #[automatically_derived] + impl #impl_generics #unsafe_to_value for &#ident #type_generics #where_clause { + type Guard = #pointer_guard; - unsafe fn unsafe_to_value(self) -> #vm_result<(#value, Self::Guard)> { - let (shared, guard) = #shared::from_ref(self); - #vm_result::Ok((#value::from(shared), guard)) + unsafe fn unsafe_to_value(self) -> #vm_result<(#value, Self::Guard)> { + let (shared, guard) = #shared::from_ref(self); + #vm_result::Ok((#value::from(shared), guard)) + } } - } - #[automatically_derived] - impl #impl_generics #unsafe_to_value for &mut #ident #type_generics #where_clause { - type Guard = #pointer_guard; + #[automatically_derived] + impl #impl_generics #unsafe_to_value for &mut #ident #type_generics #where_clause { + type Guard = #pointer_guard; - unsafe fn unsafe_to_value(self) -> #vm_result<(#value, Self::Guard)> { - let (shared, guard) = #shared::from_mut(self); - #vm_result::Ok((#value::from(shared), guard)) + unsafe fn unsafe_to_value(self) -> #vm_result<(#value, Self::Guard)> { + let (shared, guard) = #shared::from_mut(self); + #vm_result::Ok((#value::from(shared), guard)) + } } + }) + } else { + None + }; + + let impl_from_value = 'out: { + if let Some(path) = attr.from_value { + let ty = match &attr.from_value_params { + Some(params) => quote!(#ident<#params>), + None if generics.params.is_empty() => quote!(#ident), + _ => break 'out None, + }; + + Some(quote! { + impl #from_value for #ty { + fn from_value(value: Value) -> #vm_result { + let value = #vm_try!(#path(value)); + let value = #vm_try!(#shared::take(value)); + #vm_result::Ok(value) + } + } + + impl #unsafe_to_ref for #ty { + type Guard = #raw_ref; + + unsafe fn unsafe_to_ref<'a>(value: #value) -> #vm_result<(&'a Self, Self::Guard)> { + let value = #vm_try!(#path(value)); + let value = #vm_try!(#shared::into_ref(value)); + let (value, guard) = #ref_::into_raw(value); + #vm_result::Ok((value.as_ref(), guard)) + } + } + + impl #unsafe_to_mut for #ty { + type Guard = #raw_mut; + + unsafe fn unsafe_to_mut<'a>(value: #value) -> #vm_result<(&'a mut Self, Self::Guard)> { + let value = #vm_try!(#path(value)); + let value = #vm_try!(#shared::into_mut(value)); + let (mut value, guard) = #mut_::into_raw(value); + #vm_result::Ok((value.as_mut(), guard)) + } + } + + impl #from_value for #shared<#ty> { + #[inline] + fn from_value(value: #value) -> #vm_result { + #path(value) + } + } + + impl #from_value for #ref_<#ty> { + fn from_value(value: Value) -> #vm_result { + let value = #vm_try!(#path(value)); + let value = #vm_try!(#shared::into_ref(value)); + #vm_result::Ok(value) + } + } + + impl #from_value for #mut_<#ty> { + fn from_value(value: Value) -> #vm_result { + let value = #vm_try!(#path(value)); + let value = #vm_try!(#shared::into_mut(value)); + #vm_result::Ok(value) + } + } + }) + } else { + None } - }) - } else { - None - }; + }; - Ok(quote! { - #install_with - #impl_named - #any - }) + quote! { + #install_with + #impl_named + #impl_type_of + #impl_from_value + #any + } + } } diff --git a/crates/rune-macros/src/context.rs b/crates/rune-macros/src/context.rs index 48e502744..82dc5d142 100644 --- a/crates/rune-macros/src/context.rs +++ b/crates/rune-macros/src/context.rs @@ -79,6 +79,12 @@ pub(crate) struct TypeAttr { /// Indicates that this is a builtin type, so don't generate an `Any` /// implementation for it. pub(crate) builtin: Option, + /// Indicate a static type to use. + pub(crate) static_type: Option, + /// Method to use to convert from value. + pub(crate) from_value: Option, + /// Method to use to convert from value. + pub(crate) from_value_params: Option>, } /// Parsed variant attributes. @@ -455,6 +461,18 @@ impl Context { attr.constructor = true; } else if meta.path == BUILTIN { attr.builtin = Some(meta.path.span()); + } else if meta.path == STATIC_TYPE { + meta.input.parse::()?; + attr.static_type = Some(meta.input.parse()?); + } else if meta.path == FROM_VALUE { + meta.input.parse::()?; + attr.from_value = Some(meta.input.parse()?); + } else if meta.path == FROM_VALUE_PARAMS { + meta.input.parse::()?; + let content; + syn::bracketed!(content in meta.input); + attr.from_value_params = + Some(syn::punctuated::Punctuated::parse_terminated(&content)?); } else { return Err(syn::Error::new_spanned( &meta.path, @@ -545,6 +563,15 @@ impl Context { Span::call_site(), ))); + let mut alloc = syn::Path { + leading_colon: Some(::default()), + segments: Punctuated::default(), + }; + alloc.segments.push(syn::PathSegment::from(syn::Ident::new( + "alloc", + Span::call_site(), + ))); + let mut default_module; let m = match module { @@ -570,6 +597,11 @@ impl Context { compile_error: path(m, ["compile", "Error"]), context_error: path(m, ["compile", "ContextError"]), from_value: path(m, ["runtime", "FromValue"]), + raw_ref: path(m, ["runtime", "RawRef"]), + raw_mut: path(m, ["runtime", "RawMut"]), + ref_: path(m, ["runtime", "Ref"]), + mut_: path(m, ["runtime", "Mut"]), + vm_try: path(m, ["vm_try"]), full_type_of: path(m, ["runtime", "FullTypeOf"]), hash: path(m, ["Hash"]), id: path(m, ["parse", "Id"]), @@ -612,6 +644,9 @@ impl Context { iterator: path(&core, ["iter", "Iterator"]), double_ended_iterator: path(&core, ["iter", "DoubleEndedIterator"]), option: path(&core, ["option", "Option"]), + non_null: path(&core, ["ptr", "NonNull"]), + box_: path(&alloc, ["boxed", "Box"]), + static_type_mod: path(m, ["runtime", "static_type"]), } } } @@ -693,6 +728,14 @@ pub(crate) struct Tokens { pub(crate) iterator: syn::Path, pub(crate) double_ended_iterator: syn::Path, pub(crate) option: syn::Path, + pub(crate) non_null: syn::Path, + pub(crate) box_: syn::Path, + pub(crate) static_type_mod: syn::Path, + pub(crate) raw_ref: syn::Path, + pub(crate) raw_mut: syn::Path, + pub(crate) ref_: syn::Path, + pub(crate) mut_: syn::Path, + pub(crate) vm_try: syn::Path, } impl Tokens { diff --git a/crates/rune-macros/src/function.rs b/crates/rune-macros/src/function.rs index fc53d2ea3..f1a214788 100644 --- a/crates/rune-macros/src/function.rs +++ b/crates/rune-macros/src/function.rs @@ -9,7 +9,6 @@ use syn::{Error, Token}; enum Path { #[default] None, - Instance(syn::Ident, syn::PathSegment), Rename(syn::PathSegment), Protocol(syn::Path), } @@ -17,12 +16,14 @@ enum Path { #[derive(Default)] pub(crate) struct FunctionAttrs { instance: bool, + /// A free function. + free: bool, /// Keep the existing function in place, and generate a separate hidden meta function. keep: bool, /// Path to register in. path: Path, /// Looks like an associated type. - self_type: bool, + self_type: Option, } impl FunctionAttrs { @@ -35,6 +36,8 @@ impl FunctionAttrs { if ident == "instance" { out.instance = true; + } else if ident == "free" { + out.free = true; } else if ident == "keep" { out.keep = true; } else if ident == "protocol" { @@ -56,10 +59,6 @@ impl FunctionAttrs { } else if ident == "path" { input.parse::()?; - if input.peek(Token![Self]) { - out.self_type = true; - } - let path = input.parse::()?; if path.segments.len() > 2 { @@ -86,7 +85,10 @@ impl FunctionAttrs { )); }; - out.path = Path::Instance(first.ident, second); + out.self_type = Some(first); + out.path = Path::Rename(second); + } else if first.ident == "Self" { + out.self_type = Some(first); } else { out.path = Path::Rename(first); } @@ -200,37 +202,33 @@ impl Function { (meta_fn, real_fn, sig, true) }; - let real_fn_path = if self.takes_self || attrs.self_type { - let mut path = syn::Path { - leading_colon: None, - segments: Punctuated::default(), - }; - - path.segments.push(syn::PathSegment::from(syn::Ident::new( - "Self", - self.sig.span(), - ))); - path.segments.push(syn::PathSegment::from(real_fn)); + let mut path = syn::Path { + leading_colon: None, + segments: Punctuated::default(), + }; - syn::TypePath { qself: None, path } - } else { - let mut path = syn::Path { - leading_colon: None, - segments: Punctuated::default(), - }; + match (self.takes_self, attrs.free, &attrs.self_type) { + (true, _, _) => { + path.segments + .push(syn::PathSegment::from(::default())); + path.segments.push(syn::PathSegment::from(real_fn)); + } + (_, false, Some(self_type)) => { + path.segments.push(self_type.clone()); + path.segments.push(syn::PathSegment::from(real_fn)); + } + _ => { + path.segments.push(syn::PathSegment::from(real_fn)); + } + } - path.segments.push(syn::PathSegment::from(real_fn)); - syn::TypePath { qself: None, path } - }; + let real_fn_path = syn::TypePath { qself: None, path }; let name_string = syn::LitStr::new(&self.sig.ident.to_string(), self.sig.ident.span()); - let self_type; let mut name; if instance { - self_type = None; - name = 'out: { syn::Expr::Lit(syn::ExprLit { attrs: Vec::new(), @@ -243,21 +241,16 @@ impl Function { }) } Path::None => name_string.clone(), - Path::Rename(last) | Path::Instance(_, last) => { + Path::Rename(last) => { syn::LitStr::new(&last.ident.to_string(), last.ident.span()) } }), }) }; } else { - self_type = match &attrs.path { - Path::Instance(self_type, _) => Some(self_type.clone()), - _ => None, - }; - name = match &attrs.path { Path::None => expr_lit(&self.sig.ident), - Path::Rename(last) | Path::Instance(_, last) => expr_lit(&last.ident), + Path::Rename(last) => expr_lit(&last.ident), Path::Protocol(protocol) => syn::Expr::Path(syn::ExprPath { attrs: Vec::new(), qself: None, @@ -265,7 +258,7 @@ impl Function { }), }; - if !matches!(attrs.path, Path::Instance(..)) { + if attrs.self_type.is_none() { let mut out = syn::ExprArray { attrs: Vec::new(), bracket_token: syn::token::Bracket::default(), @@ -279,7 +272,7 @@ impl Function { let arguments = match &attrs.path { Path::None | Path::Protocol(_) => Punctuated::default(), - Path::Rename(last) | Path::Instance(_, last) => match &last.arguments { + Path::Rename(last) => match &last.arguments { syn::PathArguments::AngleBracketed(arguments) => arguments.args.clone(), syn::PathArguments::None => Punctuated::default(), arguments => { @@ -346,7 +339,7 @@ impl Function { let build_with = if instance { None - } else if let Some(self_type) = self_type { + } else if let Some(self_type) = &attrs.self_type { Some(quote!(.build_associated::<#self_type>())) } else { Some(quote!(.build())) diff --git a/crates/rune-macros/src/inst_display.rs b/crates/rune-macros/src/inst_display.rs index 42a14723e..9c593343b 100644 --- a/crates/rune-macros/src/inst_display.rs +++ b/crates/rune-macros/src/inst_display.rs @@ -23,7 +23,10 @@ impl Derive { let mut errors = Vec::new(); let syn::Data::Enum(en) = &self.input.data else { - errors.push(syn::Error::new_spanned(&self.input.ident, "InstDisplay is only supported for enums")); + errors.push(syn::Error::new_spanned( + &self.input.ident, + "InstDisplay is only supported for enums", + )); return Err(errors); }; diff --git a/crates/rune-macros/src/internals.rs b/crates/rune-macros/src/internals.rs index 1b3a2c3e2..8d1771e90 100644 --- a/crates/rune-macros/src/internals.rs +++ b/crates/rune-macros/src/internals.rs @@ -22,6 +22,9 @@ pub const INSTALL_WITH: Symbol = Symbol("install_with"); pub const CONSTRUCTOR: Symbol = Symbol("constructor"); pub const BUILTIN: Symbol = Symbol("builtin"); +pub const STATIC_TYPE: Symbol = Symbol("static_type"); +pub const FROM_VALUE: Symbol = Symbol("from_value"); +pub const FROM_VALUE_PARAMS: Symbol = Symbol("from_value_params"); pub const GET: Symbol = Symbol("get"); pub const SET: Symbol = Symbol("set"); pub const COPY: Symbol = Symbol("copy"); diff --git a/crates/rune-macros/src/lib.rs b/crates/rune-macros/src/lib.rs index 50a2feb02..0d3bda9a7 100644 --- a/crates/rune-macros/src/lib.rs +++ b/crates/rune-macros/src/lib.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 @@ -22,6 +22,8 @@ //! //! This is part of the [Rune Language](https://rune-rs.github.io). +#![allow(clippy::manual_map)] + use ::quote::format_ident; use syn::{Generics, Path}; @@ -44,6 +46,8 @@ mod spanned; mod to_tokens; mod to_value; +use self::context::Context; + #[proc_macro] pub fn quote(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = proc_macro2::TokenStream::from(input); @@ -178,7 +182,13 @@ pub fn to_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { #[proc_macro_derive(Any, attributes(rune))] pub fn any(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let derive = syn::parse_macro_input!(input as any::Derive); - derive.expand().unwrap_or_else(to_compile_errors).into() + let cx = Context::new(); + + let Ok(builder) = derive.into_any_builder(&cx) else { + return to_compile_errors(cx.errors.into_inner()).into(); + }; + + builder.expand().into() } /// Calculate a type hash. @@ -209,10 +219,13 @@ pub fn hash(input: proc_macro::TokenStream) -> proc_macro::TokenStream { #[doc(hidden)] pub fn __internal_impl_any(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let internal_call = syn::parse_macro_input!(input as any::InternalCall); - internal_call - .expand() - .unwrap_or_else(to_compile_errors) - .into() + let cx = Context::with_crate(); + + let Ok(builder) = internal_call.into_any_builder(&cx) else { + return to_compile_errors(cx.errors.into_inner()).into(); + }; + + builder.expand().into() } #[proc_macro_attribute] diff --git a/crates/rune-macros/src/macro_.rs b/crates/rune-macros/src/macro_.rs index 83eff340f..7360b19e1 100644 --- a/crates/rune-macros/src/macro_.rs +++ b/crates/rune-macros/src/macro_.rs @@ -144,7 +144,10 @@ impl Macro { Path::Path(_, path) => { for s in &path.segments { let syn::PathArguments::None = s.arguments else { - return Err(syn::Error::new_spanned(s, "Expected simple ident path segment")); + return Err(syn::Error::new_spanned( + s, + "Expected simple ident path segment", + )); }; let ident = syn::LitStr::new(&s.ident.to_string(), s.span()); diff --git a/crates/rune-macros/src/spanned.rs b/crates/rune-macros/src/spanned.rs index e401aa521..2fc51bf3a 100644 --- a/crates/rune-macros/src/spanned.rs +++ b/crates/rune-macros/src/spanned.rs @@ -32,7 +32,11 @@ impl Derive { let inner = match &self.input.data { syn::Data::Struct(st) => { - let Ok(inner) = expander.expand_struct_fields(&st.fields, |member| quote!(&self.#member), is_option_spanned) else { + let Ok(inner) = expander.expand_struct_fields( + &st.fields, + |member| quote!(&self.#member), + is_option_spanned, + ) else { return Err(expander.cx.errors.into_inner()); }; diff --git a/crates/rune-modules/Cargo.toml b/crates/rune-modules/Cargo.toml index bd8b751dd..f99479cc2 100644 --- a/crates/rune-modules/Cargo.toml +++ b/crates/rune-modules/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-modules" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "Native modules for Rune, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-modules/src/lib.rs b/crates/rune-modules/src/lib.rs index 87562995d..986e41803 100644 --- a/crates/rune-modules/src/lib.rs +++ b/crates/rune-modules/src/lib.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 diff --git a/crates/rune-wasm/Cargo.toml b/crates/rune-wasm/Cargo.toml index 12c4dfe81..6013aac8c 100644 --- a/crates/rune-wasm/Cargo.toml +++ b/crates/rune-wasm/Cargo.toml @@ -3,7 +3,7 @@ name = "rune-wasm" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "A WASM module for the Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" diff --git a/crates/rune-wasm/src/lib.rs b/crates/rune-wasm/src/lib.rs index 23c7b5dad..ebbd54eb4 100644 --- a/crates/rune-wasm/src/lib.rs +++ b/crates/rune-wasm/src/lib.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 diff --git a/crates/rune/Cargo.toml b/crates/rune/Cargo.toml index 1d738c990..9d05a3bd8 100644 --- a/crates/rune/Cargo.toml +++ b/crates/rune/Cargo.toml @@ -3,7 +3,7 @@ name = "rune" version = "0.12.3" authors = ["John-John Tedro "] edition = "2021" -rust-version = "1.67" +rust-version = "1.70" description = "The Rune Language, an embeddable dynamic programming language for Rust." documentation = "https://docs.rs/rune" readme = "README.md" @@ -25,7 +25,7 @@ byte-code = ["alloc", "musli-storage"] capture-io = ["alloc", "parking_lot"] disable-io = ["alloc"] fmt = ["alloc"] -std = ["num/std", "serde/std", "rune-core/std", "musli/std", "musli-storage/std", "alloc", "anyhow"] +std = ["num/std", "serde/std", "rune-core/std", "musli/std", "musli-storage/std", "alloc", "anyhow", "lazy_static"] alloc = [] [dependencies] @@ -42,7 +42,7 @@ serde = { version = "1.0.163", default-features = false, features = ["derive", " serde_bytes = { version = "0.11.9", default-features = false, features = ["alloc"] } smallvec = { version = "1.10.0", default-features = false, features = ["serde", "const_new"] } tracing = { version = "0.1.37", default-features = false, features = ["attributes"] } -hashbrown = { version = "0.13.2", features = ["serde"] } +hashbrown = { version = "0.14.0", features = ["serde"] } musli = { version = "0.0.42", default-features = false, features = ["alloc"] } slab = { version = "0.4.8", default-features = false } @@ -74,6 +74,7 @@ similar = { version = "2.2.1", optional = true, features = ["inline", "bytes"] } sha2 = { version = "0.10.6", optional = true } base64 = { version = "0.21.0", optional = true } rand = { version = "0.8.5", optional = true } +lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] tokio = { version = "1.28.1", features = ["full"] } diff --git a/crates/rune/src/compile/compile.rs b/crates/rune/src/compile/compile.rs index 80a598351..34a626bc3 100644 --- a/crates/rune/src/compile/compile.rs +++ b/crates/rune/src/compile/compile.rs @@ -255,7 +255,11 @@ impl<'arena> CompileBuildEntry<'_, 'arena> { c.q.lookup_meta(&location, f.impl_item, GenericsParameters::default())?; let Some(type_hash) = meta.type_hash_of() else { - return Err(compile::Error::expected_meta(&f.ast, meta.info(c.q.pool), "instance function")); + return Err(compile::Error::expected_meta( + &f.ast, + meta.info(c.q.pool), + "instance function", + )); }; let mut cx = hir::lowering::Ctxt::with_query( @@ -405,13 +409,16 @@ impl<'arena> CompileBuildEntry<'_, 'arena> { Build::ReExport => { tracing::trace!("re-export: {}", self.q.pool.item(item_meta.item)); - let Some(import) = self.q.import(&location, item_meta.module, item_meta.item, used)? else { + let Some(import) = + self.q + .import(&location, item_meta.module, item_meta.item, used)? + else { return Err(compile::Error::new( location.span, ErrorKind::MissingItem { item: self.q.pool.item(item_meta.item).to_owned(), }, - )) + )); }; self.q.unit.new_function_reexport( diff --git a/crates/rune/src/compile/context.rs b/crates/rune/src/compile/context.rs index dc87427ba..e18475a61 100644 --- a/crates/rune/src/compile/context.rs +++ b/crates/rune/src/compile/context.rs @@ -7,7 +7,9 @@ use crate::no_std::sync::Arc; use crate::compile::meta; #[cfg(feature = "doc")] use crate::compile::Docs; -use crate::compile::{ComponentRef, ContextError, IntoComponent, Item, ItemBuf, MetaInfo, Names}; +#[cfg(feature = "emit")] +use crate::compile::MetaInfo; +use crate::compile::{ComponentRef, ContextError, IntoComponent, Item, ItemBuf, Names}; use crate::hash; use crate::module::{ Fields, InternalEnum, Module, ModuleAssociated, ModuleAttributeMacro, ModuleConstant, @@ -35,6 +37,7 @@ pub(crate) struct ContextMeta { } impl ContextMeta { + #[cfg(feature = "emit")] pub(crate) fn info(&self) -> MetaInfo { MetaInfo::new(&self.kind, self.hash, self.item.as_deref()) } @@ -136,6 +139,7 @@ impl Context { this.install(crate::modules::any::module()?)?; this.install(crate::modules::bytes::module()?)?; this.install(crate::modules::char::module()?)?; + this.install(crate::modules::hash::module()?)?; this.install(crate::modules::cmp::module()?)?; this.install(crate::modules::collections::module()?)?; this.install(crate::modules::f64::module()?)?; diff --git a/crates/rune/src/compile/error.rs b/crates/rune/src/compile/error.rs index f4561789d..f77d41167 100644 --- a/crates/rune/src/compile/error.rs +++ b/crates/rune/src/compile/error.rs @@ -212,6 +212,7 @@ pub(crate) enum ErrorKind { }, ModAlreadyLoaded { item: ItemBuf, + #[cfg(feature = "emit")] existing: (SourceId, Span), }, MissingMacro { @@ -268,7 +269,9 @@ pub(crate) enum ErrorKind { UnsupportedPatternExpr, UnsupportedBinding, DuplicateObjectKey { + #[cfg(feature = "emit")] existing: Span, + #[cfg(feature = "emit")] object: Span, }, InstanceFunctionOutsideImpl, @@ -279,6 +282,7 @@ pub(crate) enum ErrorKind { ContinueOutsideOfLoop, SelectMultipleDefaults, ExpectedBlockSemiColon { + #[cfg(feature = "emit")] followed_span: Span, }, FnConstAsyncConflict, @@ -339,13 +343,16 @@ pub(crate) enum ErrorKind { name: Box, }, VariableMoved { + #[cfg(feature = "emit")] moved_at: Span, }, UnsupportedGenerics, NestedTest { + #[cfg(feature = "emit")] nested_span: Span, }, NestedBench { + #[cfg(feature = "emit")] nested_span: Span, }, MissingFunctionHash { @@ -356,6 +363,7 @@ pub(crate) enum ErrorKind { }, PatternMissingFields { item: ItemBuf, + #[cfg(feature = "emit")] fields: Box<[Box]>, }, MissingLabelLocation { @@ -412,21 +420,27 @@ pub(crate) enum ErrorKind { BadNumberLiteral, AmbiguousItem { item: ItemBuf, + #[cfg(feature = "emit")] locations: Vec<(Location, ItemBuf)>, }, AmbiguousContextItem { item: ItemBuf, + #[cfg(feature = "emit")] infos: Box<[MetaInfo]>, }, NotVisible { + #[cfg(feature = "emit")] chain: Vec, + #[cfg(feature = "emit")] location: Location, visibility: Visibility, item: ItemBuf, from: ItemBuf, }, NotVisibleMod { + #[cfg(feature = "emit")] chain: Vec, + #[cfg(feature = "emit")] location: Location, visibility: Visibility, item: ItemBuf, @@ -436,6 +450,7 @@ pub(crate) enum ErrorKind { item: ItemBuf, }, ImportCycle { + #[cfg(feature = "emit")] path: Vec, }, ImportRecursionLimit { diff --git a/crates/rune/src/compile/ir/compiler.rs b/crates/rune/src/compile/ir/compiler.rs index 6cab1f86a..741397b10 100644 --- a/crates/rune/src/compile/ir/compiler.rs +++ b/crates/rune/src/compile/ir/compiler.rs @@ -44,7 +44,10 @@ pub(crate) fn expr(hir: &hir::Expr<'_>, c: &mut Ctxt<'_, '_>) -> compile::Result } hir::ExprKind::Const(hash) => { let Some(value) = c.q.get_const_value(hash) else { - return Err(compile::Error::msg(hir, format_args!("Missing constant for hash {hash}"))); + return Err(compile::Error::msg( + hir, + format_args!("Missing constant for hash {hash}"), + )); }; ir::Ir::new(span, ir::Value::from_const(value)) @@ -393,7 +396,7 @@ fn expr_if( let Some(cond) = hir.condition else { let ir = block(&hir.block, c)?; default_branch = Some(ir); - continue + continue; }; let cond = condition(cond, c)?; diff --git a/crates/rune/src/compile/ir/interpreter.rs b/crates/rune/src/compile/ir/interpreter.rs index 03db374f2..ae18a5bff 100644 --- a/crates/rune/src/compile/ir/interpreter.rs +++ b/crates/rune/src/compile/ir/interpreter.rs @@ -110,7 +110,10 @@ impl Interpreter<'_, '_> { match &meta.kind { meta::Kind::Const => { let Some(const_value) = self.q.get_const_value(meta.hash) else { - return Err(compile::Error::msg(span, format_args!("Missing constant for hash {}", meta.hash))); + return Err(compile::Error::msg( + span, + format_args!("Missing constant for hash {}", meta.hash), + )); }; return Ok(ir::Value::from_const(const_value)); diff --git a/crates/rune/src/compile/pool.rs b/crates/rune/src/compile/pool.rs index 0a0f200e9..67717392e 100644 --- a/crates/rune/src/compile/pool.rs +++ b/crates/rune/src/compile/pool.rs @@ -4,7 +4,9 @@ use core::fmt; use crate::no_std::collections::HashMap; -use crate::compile::{Item, ItemBuf, Location, Visibility}; +#[cfg(feature = "emit")] +use crate::compile::Location; +use crate::compile::{Item, ItemBuf, Visibility}; use crate::hash::Hash; /// The identifier of a module. @@ -34,6 +36,7 @@ impl fmt::Display for ItemId { #[non_exhaustive] pub(crate) struct ModMeta { /// The location of the module. + #[cfg(feature = "emit")] pub(crate) location: Location, /// The item of the module. pub(crate) item: ItemId, diff --git a/crates/rune/src/compile/v1/assemble.rs b/crates/rune/src/compile/v1/assemble.rs index 721f12621..f71e86a7c 100644 --- a/crates/rune/src/compile/v1/assemble.rs +++ b/crates/rune/src/compile/v1/assemble.rs @@ -431,10 +431,7 @@ fn pat_lit<'hir>( load: &dyn Fn(&mut Ctxt<'_, 'hir, '_>, Needs) -> compile::Result<()>, ) -> compile::Result { let Some(inst) = pat_lit_inst(cx, hir)? else { - return Err(compile::Error::new( - hir, - ErrorKind::UnsupportedPatternExpr, - )); + return Err(compile::Error::new(hir, ErrorKind::UnsupportedPatternExpr)); }; load(cx, Needs::Value)?; @@ -1204,10 +1201,7 @@ fn expr_binary<'hir>( }; let Some(target) = supported else { - return Err(compile::Error::new( - span, - ErrorKind::UnsupportedBinaryExpr, - )); + return Err(compile::Error::new(span, ErrorKind::UnsupportedBinaryExpr)); }; let op = match bin_op { @@ -1301,10 +1295,7 @@ fn expr_break<'hir>( _: Needs, ) -> compile::Result> { let Some(current_loop) = cx.loops.last().cloned() else { - return Err(compile::Error::new( - span, - ErrorKind::BreakOutsideOfLoop, - )); + return Err(compile::Error::new(span, ErrorKind::BreakOutsideOfLoop)); }; let (last_loop, to_drop, has_value) = match (hir.label, hir.expr) { @@ -1479,10 +1470,7 @@ fn expr_continue<'hir>( _: Needs, ) -> compile::Result> { let Some(current_loop) = cx.loops.last().cloned() else { - return Err(compile::Error::new( - span, - ErrorKind::ContinueOutsideOfLoop, - )); + return Err(compile::Error::new(span, ErrorKind::ContinueOutsideOfLoop)); }; let last_loop = if let Some(label) = hir.label { @@ -2019,10 +2007,7 @@ fn reorder_field_assignments<'hir>( for a in 0..hir.assignments.len() { loop { let Some(&b) = order.get(a) else { - return Err(compile::Error::msg( - span, - "Order out-of-bounds", - )); + return Err(compile::Error::msg(span, "Order out-of-bounds")); }; if a == b { diff --git a/crates/rune/src/compile/v1/scopes.rs b/crates/rune/src/compile/v1/scopes.rs index 33676d394..ac3a6cc8e 100644 --- a/crates/rune/src/compile/v1/scopes.rs +++ b/crates/rune/src/compile/v1/scopes.rs @@ -143,11 +143,12 @@ impl<'hir> Scopes<'hir> { tracing::trace!(?var, "getting var"); q.visitor.visit_variable_use(self.source_id, var.span, span); - if let Some(moved_at) = var.moved_at { + if let Some(_moved_at) = var.moved_at { return Err(compile::Error::new( span, ErrorKind::VariableMoved { - moved_at: moved_at.span(), + #[cfg(feature = "emit")] + moved_at: _moved_at.span(), }, )); } @@ -177,11 +178,12 @@ impl<'hir> Scopes<'hir> { tracing::trace!(?var, "taking var"); q.visitor.visit_variable_use(self.source_id, var.span, span); - if let Some(moved_at) = var.moved_at { + if let Some(_moved_at) = var.moved_at { return Err(compile::Error::new( span, ErrorKind::VariableMoved { - moved_at: moved_at.span(), + #[cfg(feature = "emit")] + moved_at: _moved_at.span(), }, )); } diff --git a/crates/rune/src/doc.rs b/crates/rune/src/doc.rs index 15ed3135f..b80a62738 100644 --- a/crates/rune/src/doc.rs +++ b/crates/rune/src/doc.rs @@ -1,7 +1,7 @@ //! Helper to generate documentation from a context. mod context; -pub(self) use self::context::Context; +use self::context::Context; mod artifacts; pub(crate) use self::artifacts::{TestParams, Artifacts}; diff --git a/crates/rune/src/hashbrown.rs b/crates/rune/src/hashbrown.rs new file mode 100644 index 000000000..86b52d3f3 --- /dev/null +++ b/crates/rune/src/hashbrown.rs @@ -0,0 +1,5 @@ +mod table; +pub(crate) use self::fork::raw::RawIter; +pub(crate) use self::table::{IterRef, Table}; + +mod fork; diff --git a/crates/rune/src/hashbrown/fork.rs b/crates/rune/src/hashbrown/fork.rs new file mode 100644 index 000000000..2778616b2 --- /dev/null +++ b/crates/rune/src/hashbrown/fork.rs @@ -0,0 +1,33 @@ +#![allow(unused)] +#![allow(clippy::manual_map)] + +// Copied and modified under the MIT license. +// Copyright (c) 2016 Amanieu d'Antras +// +// Imported using import_hashbrown.ps1, the below section is the only part +// copied by hand. +// +// After an import of the crate some sections might need to be modified. +// +// See: https://github.com/rust-lang/hashbrown +// The relevant fork: https://github.com/udoprog/hashbrown/tree/raw-infallible-context +// Relevant issue: https://github.com/rust-lang/hashbrown/issues/456 + +#[macro_use] +mod macros; +pub(crate) mod raw; +mod scopeguard; + +/// The error type for `try_reserve` methods. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TryReserveError { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + + /// The memory allocator returned an error + AllocError { + /// The layout of the allocation request that failed. + layout: alloc::alloc::Layout, + }, +} diff --git a/crates/rune/src/hashbrown/fork/macros.rs b/crates/rune/src/hashbrown/fork/macros.rs new file mode 100644 index 000000000..eaba6bed1 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/macros.rs @@ -0,0 +1,70 @@ +// See the cfg-if crate. +#[allow(unused_macro_rules)] +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an appropriate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} + +// Helper macro for specialization. This also helps avoid parse errors if the +// default fn syntax for specialization changes in the future. +#[cfg(feature = "nightly")] +macro_rules! default_fn { + (#[$($a:tt)*] $($tt:tt)*) => { + #[$($a)*] default $($tt)* + } +} +#[cfg(not(feature = "nightly"))] +macro_rules! default_fn { + ($($tt:tt)*) => { + $($tt)* + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/alloc.rs b/crates/rune/src/hashbrown/fork/raw/alloc.rs new file mode 100644 index 000000000..15299e7b0 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/alloc.rs @@ -0,0 +1,86 @@ +pub(crate) use self::inner::{do_alloc, Allocator, Global}; + +// Nightly-case. +// Use unstable `allocator_api` feature. +// This is compatible with `allocator-api2` which can be enabled or not. +// This is used when building for `std`. +#[cfg(feature = "nightly")] +mod inner { + use crate::alloc::alloc::Layout; + pub use crate::alloc::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.as_non_null_ptr()), + Err(_) => Err(()), + } + } +} + +// Basic non-nightly case. +// This uses `allocator-api2` enabled by default. +// If any crate enables "nightly" in `allocator-api2`, +// this will be equivalent to the nightly case, +// since `allocator_api2::alloc::Allocator` would be re-export of +// `core::alloc::Allocator`. +#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))] +mod inner { + use crate::alloc::alloc::Layout; + pub use allocator_api2::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.cast()), + Err(_) => Err(()), + } + } +} + +// No-defaults case. +// When building with default-features turned off and +// neither `nightly` nor `allocator-api2` is enabled, +// this will be used. +// Making it impossible to use any custom allocator with collections defined +// in this crate. +// Any crate in build-tree can enable `allocator-api2`, +// or `nightly` without disturbing users that don't want to use it. +#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))] +mod inner { + use crate::alloc::alloc::{alloc, dealloc, Layout}; + use core::ptr::NonNull; + + #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate + pub unsafe trait Allocator { + fn allocate(&self, layout: Layout) -> Result, ()>; + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); + } + + #[derive(Copy, Clone)] + pub struct Global; + + unsafe impl Allocator for Global { + #[inline] + fn allocate(&self, layout: Layout) -> Result, ()> { + unsafe { NonNull::new(alloc(layout)).ok_or(()) } + } + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + dealloc(ptr.as_ptr(), layout); + } + } + + impl Default for Global { + #[inline] + fn default() -> Self { + Global + } + } + + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + alloc.allocate(layout) + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/bitmask.rs b/crates/rune/src/hashbrown/fork/raw/bitmask.rs new file mode 100644 index 000000000..6576b3c5c --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/bitmask.rs @@ -0,0 +1,133 @@ +use super::imp::{ + BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, +}; + +/// A bit mask which contains the result of a `Match` operation on a `Group` and +/// allows iterating through them. +/// +/// The bit mask is arranged so that low-order bits represent lower memory +/// addresses for group match results. +/// +/// For implementation reasons, the bits in the set may be sparsely packed with +/// groups of 8 bits representing one element. If any of these bits are non-zero +/// then this element is considered to true in the mask. If this is the +/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be +/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is +/// similarly a mask of all the actually-used bits. +/// +/// To iterate over a bit mask, it must be converted to a form where only 1 bit +/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the +/// mask bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMask(pub(crate) BitMaskWord); + +#[allow(clippy::use_self)] +impl BitMask { + /// Returns a new `BitMask` with all bits inverted. + #[inline] + #[must_use] + #[allow(dead_code)] + pub(crate) fn invert(self) -> Self { + BitMask(self.0 ^ BITMASK_MASK) + } + + /// Returns a new `BitMask` with the lowest bit removed. + #[inline] + #[must_use] + fn remove_lowest_bit(self) -> Self { + BitMask(self.0 & (self.0 - 1)) + } + + /// Returns whether the `BitMask` has at least one set bit. + #[inline] + pub(crate) fn any_bit_set(self) -> bool { + self.0 != 0 + } + + /// Returns the first set bit in the `BitMask`, if there is one. + #[inline] + pub(crate) fn lowest_set_bit(self) -> Option { + if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) { + Some(Self::nonzero_trailing_zeros(nonzero)) + } else { + None + } + } + + /// Returns the number of trailing zeroes in the `BitMask`. + #[inline] + pub(crate) fn trailing_zeros(self) -> usize { + // ARM doesn't have a trailing_zeroes instruction, and instead uses + // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM + // versions (pre-ARMv7) don't have RBIT and need to emulate it + // instead. Since we only have 1 bit set in each byte on ARM, we can + // use swap_bytes (REV) + leading_zeroes instead. + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE + } else { + self.0.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Same as above but takes a `NonZeroBitMaskWord`. + #[inline] + fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize { + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + // SAFETY: A byte-swapped non-zero value is still non-zero. + let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) }; + swapped.leading_zeros() as usize / BITMASK_STRIDE + } else { + nonzero.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Returns the number of leading zeroes in the `BitMask`. + #[inline] + pub(crate) fn leading_zeros(self) -> usize { + self.0.leading_zeros() as usize / BITMASK_STRIDE + } +} + +impl IntoIterator for BitMask { + type Item = usize; + type IntoIter = BitMaskIter; + + #[inline] + fn into_iter(self) -> BitMaskIter { + // A BitMask only requires each element (group of bits) to be non-zero. + // However for iteration we need each element to only contain 1 bit. + BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK)) + } +} + +/// Iterator over the contents of a `BitMask`, returning the indices of set +/// bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMaskIter(pub(crate) BitMask); + +impl BitMaskIter { + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub(crate) unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 .0 ^= mask; + // The bit was set if the bit is now 0. + self.0 .0 & mask == 0 + } +} + +impl Iterator for BitMaskIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + let bit = self.0.lowest_set_bit()?; + self.0 = self.0.remove_lowest_bit(); + Some(bit) + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/generic.rs b/crates/rune/src/hashbrown/fork/raw/generic.rs new file mode 100644 index 000000000..c668b0642 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/generic.rs @@ -0,0 +1,157 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::{mem, ptr}; + +// Use the native word size as the group size. Using a 64-bit group size on +// a 32-bit architecture will just end up being more expensive because +// shifts and multiplies will need to be emulated. + +cfg_if! { + if #[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", + target_arch = "wasm32", + ))] { + type GroupWord = u64; + type NonZeroGroupWord = core::num::NonZeroU64; + } else { + type GroupWord = u32; + type NonZeroGroupWord = core::num::NonZeroU32; + } +} + +pub(crate) type BitMaskWord = GroupWord; +pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord; +pub(crate) const BITMASK_STRIDE: usize = 8; +// We only care about the highest bit of each byte for the mask. +#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] +pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; + +/// Helper function to replicate a byte across a `GroupWord`. +#[inline] +fn repeat(byte: u8) -> GroupWord { + GroupWord::from_ne_bytes([byte; Group::WIDTH]) +} + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a word-sized integer. +#[derive(Copy, Clone)] +pub(crate) struct Group(GroupWord); + +// We perform all operations in the native endianness, and convert to +// little-endian just before creating a BitMask. The can potentially +// enable the compiler to eliminate unnecessary byte swaps if we are +// only checking whether a BitMask is empty. +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const u8) -> Self { + Group(ptr::read_unaligned(ptr.cast())) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(ptr::read(ptr.cast())) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + ptr::write(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which *may* + /// have the given value. + /// + /// This function may return a false positive in certain cases where + /// the byte in the group differs from the searched value only in its + /// lowest bit. This is fine because: + /// - This never happens for `EMPTY` and `DELETED`, only full entries. + /// - The check for key equality will catch these. + /// - This only happens if there is at least 1 true match. + /// - The chance of this happening is very low (< 1% chance per byte). + #[inline] + pub(crate) fn match_byte(self, byte: u8) -> BitMask { + // This algorithm is derived from + // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + let cmp = self.0 ^ repeat(byte); + BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + // If the high bit is set, then the byte must be either: + // 1111_1111 (EMPTY) or 1000_0000 (DELETED). + // So we can just check if the top two bits are 1 by ANDing them. + BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask((self.0 & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub(crate) fn match_full(self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let full = 1000_0000 (true) or 0000_0000 (false) + // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) + // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) + let full = !self.0 & repeat(0x80); + Group(!full + (full >> 7)) + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/mod.rs b/crates/rune/src/hashbrown/fork/raw/mod.rs new file mode 100644 index 000000000..222a42e92 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/mod.rs @@ -0,0 +1,3640 @@ +use crate::alloc::alloc::{handle_alloc_error, Layout}; +use crate::hashbrown::fork::scopeguard::guard; +use crate::hashbrown::fork::TryReserveError; +use core::convert::Infallible; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem; +use core::mem::ManuallyDrop; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use core::{hint, ptr}; + +cfg_if! { + // Use the SSE2 implementation if possible: it allows us to scan 16 buckets + // at once instead of 8. We don't bother with AVX since it would require + // runtime dispatch and wouldn't gain us much anyways: the probability of + // finding a match drops off drastically after the first few buckets. + // + // I attempted an implementation on ARM using NEON instructions, but it + // turns out that most NEON instructions have multi-cycle latency, which in + // the end outweighs any gains over the generic implementation. + if #[cfg(all( + target_feature = "sse2", + any(target_arch = "x86", target_arch = "x86_64"), + not(miri) + ))] { + mod sse2; + use sse2 as imp; + } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] { + mod neon; + use neon as imp; + } else { + mod generic; + use generic as imp; + } +} + +mod alloc; +pub(crate) use self::alloc::{do_alloc, Allocator, Global}; + +mod bitmask; + +use self::bitmask::BitMaskIter; +use self::imp::Group; + +// Branch prediction hint. This is currently only available on nightly but it +// consistently improves performance by 10-15%. +#[cfg(not(feature = "nightly"))] +use core::convert::identity as likely; +#[cfg(not(feature = "nightly"))] +use core::convert::identity as unlikely; +#[cfg(feature = "nightly")] +use core::intrinsics::{likely, unlikely}; + +// Use strict provenance functions if available. +#[cfg(feature = "nightly")] +use core::ptr::invalid_mut; +// Implement it with a cast otherwise. +#[cfg(not(feature = "nightly"))] +#[inline(always)] +fn invalid_mut(addr: usize) -> *mut T { + addr as *mut T +} + +#[inline] +unsafe fn offset_from(to: *const T, from: *const T) -> usize { + to.offset_from(from) as usize +} + +/// Helper for coercing an infallible result into `Ok`. +#[inline(always)] +fn into_ok(result: Result) -> T { + match result { + Ok(value) => value, + Err(error) => match error {}, + } +} + +#[inline(always)] +fn infallible_eq( + mut f: impl FnMut(&T) -> bool, +) -> impl FnMut(&mut (), &T) -> Result { + move |_, value| Ok::<_, Infallible>(f(value)) +} + +#[inline(always)] +fn infallible_hasher(f: impl Fn(&T) -> u64) -> impl Fn(&mut (), &T) -> Result { + move |_, value| Ok::<_, Infallible>(f(value)) +} + +/// Whether memory allocation errors should return an error or abort. +#[derive(Copy, Clone)] +enum Fallibility { + Fallible, + Infallible, +} + +impl Fallibility { + /// Error to return on capacity overflow. + #[cfg_attr(feature = "inline-more", inline)] + fn capacity_overflow(self) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::CapacityOverflow, + Fallibility::Infallible => panic!("Hash table capacity overflow"), + } + } + + /// Error to return on allocation error. + #[cfg_attr(feature = "inline-more", inline)] + fn alloc_err(self, layout: Layout) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::AllocError { layout }, + Fallibility::Infallible => handle_alloc_error(layout), + } + } +} + +/// Control byte value for an empty bucket. +const EMPTY: u8 = 0b1111_1111; + +/// Control byte value for a deleted bucket. +const DELETED: u8 = 0b1000_0000; + +/// Checks whether a control byte represents a full bucket (top bit is clear). +#[inline] +fn is_full(ctrl: u8) -> bool { + ctrl & 0x80 == 0 +} + +/// Checks whether a control byte represents a special value (top bit is set). +#[inline] +fn is_special(ctrl: u8) -> bool { + ctrl & 0x80 != 0 +} + +/// Checks whether a special control value is EMPTY (just check 1 bit). +#[inline] +fn special_is_empty(ctrl: u8) -> bool { + debug_assert!(is_special(ctrl)); + ctrl & 0x01 != 0 +} + +/// Primary hash function, used to select the initial bucket to probe from. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h1(hash: u64) -> usize { + // On 32-bit platforms we simply ignore the higher hash bits. + hash as usize +} + +// Constant for h2 function that grabing the top 7 bits of the hash. +const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { + mem::size_of::() +} else { + mem::size_of::() +}; + +/// Secondary hash function, saved in the low 7 bits of the control byte. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h2(hash: u64) -> u8 { + // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit + // value, some hash functions (such as FxHash) produce a usize result + // instead, which means that the top 32 bits are 0 on 32-bit platforms. + // So we use MIN_HASH_LEN constant to handle this. + let top7 = hash >> (MIN_HASH_LEN * 8 - 7); + (top7 & 0x7f) as u8 // truncation +} + +/// Probe sequence based on triangular numbers, which is guaranteed (since our +/// table size is a power of two) to visit every group of elements exactly once. +/// +/// A triangular probe has us jump by 1 more group every time. So first we +/// jump by 1 group (meaning we just continue our linear scan), then 2 groups +/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. +/// +/// Proof that the probe will visit every group in the table: +/// +struct ProbeSeq { + pos: usize, + stride: usize, +} + +impl ProbeSeq { + #[inline] + fn move_next(&mut self, bucket_mask: usize) { + // We should have found an empty bucket by now and ended the probe. + debug_assert!( + self.stride <= bucket_mask, + "Went past end of probe sequence" + ); + + self.stride += Group::WIDTH; + self.pos += self.stride; + self.pos &= bucket_mask; + } +} + +/// Returns the number of buckets needed to hold the given number of items, +/// taking the maximum load factor into account. +/// +/// Returns `None` if an overflow occurs. +// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 +#[cfg_attr(target_os = "emscripten", inline(never))] +#[cfg_attr(not(target_os = "emscripten"), inline)] +fn capacity_to_buckets(cap: usize) -> Option { + debug_assert_ne!(cap, 0); + + // For small tables we require at least 1 empty bucket so that lookups are + // guaranteed to terminate if an element doesn't exist in the table. + if cap < 8 { + // We don't bother with a table size of 2 buckets since that can only + // hold a single element. Instead we skip directly to a 4 bucket table + // which can hold 3 elements. + return Some(if cap < 4 { 4 } else { 8 }); + } + + // Otherwise require 1/8 buckets to be empty (87.5% load) + // + // Be careful when modifying this, calculate_layout relies on the + // overflow check here. + let adjusted_cap = cap.checked_mul(8)? / 7; + + // Any overflows will have been caught by the checked_mul. Also, any + // rounding errors from the division above will be cleaned up by + // next_power_of_two (which can't overflow because of the previous division). + Some(adjusted_cap.next_power_of_two()) +} + +/// Returns the maximum effective capacity for the given bucket mask, taking +/// the maximum load factor into account. +#[inline] +fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { + if bucket_mask < 8 { + // For tables with 1/2/4/8 buckets, we always reserve one empty slot. + // Keep in mind that the bucket mask is one less than the bucket count. + bucket_mask + } else { + // For larger tables we reserve 12.5% of the slots as empty. + ((bucket_mask + 1) / 8) * 7 + } +} + +/// Helper which allows the max calculation for ctrl_align to be statically computed for each T +/// while keeping the rest of `calculate_layout_for` independent of `T` +#[derive(Copy, Clone)] +struct TableLayout { + size: usize, + ctrl_align: usize, +} + +impl TableLayout { + #[inline] + const fn new() -> Self { + let layout = Layout::new::(); + Self { + size: layout.size(), + ctrl_align: if layout.align() > Group::WIDTH { + layout.align() + } else { + Group::WIDTH + }, + } + } + + #[inline] + fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { + debug_assert!(buckets.is_power_of_two()); + + let TableLayout { size, ctrl_align } = self; + // Manual layout calculation since Layout methods are not yet stable. + let ctrl_offset = + size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); + let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; + + // We need an additional check to ensure that the allocation doesn't + // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295). + if len > isize::MAX as usize - (ctrl_align - 1) { + return None; + } + + Some(( + unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, + ctrl_offset, + )) + } +} + +/// A reference to an empty bucket into which an can be inserted. +pub struct InsertSlot { + index: usize, +} + +/// A reference to a hash table bucket containing a `T`. +/// +/// This is usually just a pointer to the element itself. However if the element +/// is a ZST, then we instead track the index of the element in the table so +/// that `erase` works properly. +pub struct Bucket { + // Actually it is pointer to next element than element itself + // this is needed to maintain pointer arithmetic invariants + // keeping direct pointer to element introduces difficulty. + // Using `NonNull` for variance and niche layout + ptr: NonNull, +} + +// This Send impl is needed for rayon support. This is safe since Bucket is +// never exposed in a public API. +unsafe impl Send for Bucket {} + +impl Clone for Bucket { + #[inline] + fn clone(&self) -> Self { + Self { ptr: self.ptr } + } +} + +impl Bucket { + const IS_ZERO_SIZED_TYPE: bool = mem::size_of::() == 0; + + /// Creates a [`Bucket`] that contain pointer to the data. + /// The pointer calculation is performed by calculating the + /// offset from given `base` pointer (convenience for + /// `base.as_ptr().sub(index)`). + /// + /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// If the `T` is a ZST, then we instead track the index of the element + /// in the table so that `erase` works properly (return + /// `NonNull::new_unchecked((index + 1) as *mut T)`) + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * the `base` pointer must not be `dangling` and must points to the + /// end of the first `value element` from the `data part` of the table, i.e. + /// must be the pointer that returned by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::hashbrown::fork::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::data_end`]: crate::hashbrown::fork::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable::buckets`]: crate::hashbrown::fork::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets + #[inline] + unsafe fn from_base_index(base: NonNull, index: usize) -> Self { + // If mem::size_of::() != 0 then return a pointer to an `element` in + // the data part of the table (we start counting from "0", so that + // in the expression T[last], the "last" index actually one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"): + // + // `from_base_index(base, 1).as_ptr()` returns a pointer that + // points here in the data part of the table + // (to the start of T1) + // | + // | `base: NonNull` must point here + // | (to the end of T0 or to the start of C0) + // v v + // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast + // ^ + // `from_base_index(base, 1)` returns a pointer + // that points here in the data part of the table + // (to the end of T1) + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes + // or metadata for data. + let ptr = if Self::IS_ZERO_SIZED_TYPE { + // won't overflow because index must be less than length (bucket_mask) + // and bucket_mask is guaranteed to be less than `isize::MAX` + // (see TableLayout::calculate_layout_for method) + invalid_mut(index + 1) + } else { + base.as_ptr().sub(index) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + + /// Calculates the index of a [`Bucket`] as distance between two pointers + /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`). + /// The returned value is in units of T: the distance in bytes divided by + /// [`core::mem::size_of::()`]. + /// + /// If the `T` is a ZST, then we return the index of the element in + /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`). + /// + /// This function is the inverse of [`from_base_index`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`. + /// + /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`] + /// method, as well as for the correct logic of the work of this crate, the + /// following rules are necessary and sufficient: + /// + /// * `base` contained pointer must not be `dangling` and must point to the + /// end of the first `element` from the `data part` of the table, i.e. + /// must be a pointer that returns by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `self` also must not contain dangling pointer; + /// + /// * both `self` and `base` must be created from the same [`RawTable`] + /// (or [`RawTableInner`]). + /// + /// If `mem::size_of::() == 0`, this function is always safe. + /// + /// [`Bucket`]: crate::hashbrown::fork::raw::Bucket + /// [`from_base_index`]: crate::hashbrown::fork::raw::Bucket::from_base_index + /// [`RawTable::data_end`]: crate::hashbrown::fork::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable`]: crate::hashbrown::fork::raw::RawTable + /// [`RawTableInner`]: RawTableInner + /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from + #[inline] + unsafe fn to_base_index(&self, base: NonNull) -> usize { + // If mem::size_of::() != 0 then return an index under which we used to store the + // `element` in the data part of the table (we start counting from "0", so + // that in the expression T[last], the "last" index actually is one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"). + // For example for 5th element in table calculation is performed like this: + // + // mem::size_of::() + // | + // | `self = from_base_index(base, 5)` that returns pointer + // | that points here in tha data part of the table + // | (to the end of T5) + // | | `base: NonNull` must point here + // v | (to the end of T0 or to the start of C0) + // /???\ v v + // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast + // \__________ __________/ + // \/ + // `bucket.to_base_index(base)` = 5 + // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::() + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. + if Self::IS_ZERO_SIZED_TYPE { + // this can not be UB + self.ptr.as_ptr() as usize - 1 + } else { + offset_from(base.as_ptr(), self.ptr.as_ptr()) + } + } + + /// Acquires the underlying raw pointer `*mut T` to `data`. + /// + /// # Note + /// + /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the + /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because + /// for properly dropping the data we also need to clear `data` control bytes. If we + /// drop data, but do not clear `data control byte` it leads to double drop when + /// [`RawTable`] goes out of scope. + /// + /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new + /// `T` value and its borrowed form *must* match those for the old `T` value, as the map + /// will not re-evaluate where the new value should go, meaning the value may become + /// "lost" if their location does not reflect their state. + /// + /// [`RawTable`]: crate::hashbrown::fork::raw::RawTable + /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value = ("a", 100); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap(); + /// + /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100)); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` + #[inline] + pub fn as_ptr(&self) -> *mut T { + if Self::IS_ZERO_SIZED_TYPE { + // Just return an arbitrary ZST pointer which is properly aligned + // invalid pointer is good enough for ZST + invalid_mut(mem::align_of::()) + } else { + unsafe { self.ptr.as_ptr().sub(1) } + } + } + + /// Create a new [`Bucket`] that is offset from the `self` by the given + /// `offset`. The pointer calculation is performed by calculating the + /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`). + /// This function is used for iterators. + /// + /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * `self` contained pointer must not be `dangling`; + /// + /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other + /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned + /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words, + /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the + /// function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::hashbrown::fork::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::buckets`]: crate::hashbrown::fork::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets + #[inline] + unsafe fn next_n(&self, offset: usize) -> Self { + let ptr = if Self::IS_ZERO_SIZED_TYPE { + // invalid pointer is good enough for ZST + invalid_mut(self.ptr.as_ptr() as usize + offset) + } else { + self.ptr.as_ptr().sub(offset) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + + /// Executes the destructor (if any) of the pointed-to `data`. + /// + /// # Safety + /// + /// See [`ptr::drop_in_place`] for safety concerns. + /// + /// You should use [`RawTable::erase`] instead of this function, + /// or be careful with calling this function directly, because for + /// properly dropping the data we need also clear `data` control bytes. + /// If we drop data, but do not erase `data control byte` it leads to + /// double drop when [`RawTable`] goes out of scope. + /// + /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html + /// [`RawTable`]: crate::hashbrown::fork::raw::RawTable + /// [`RawTable::erase`]: crate::hashbrown::fork::raw::RawTable::erase + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) unsafe fn drop(&self) { + self.as_ptr().drop_in_place(); + } + + /// Reads the `value` from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// # Safety + /// + /// See [`ptr::read`] for safety concerns. + /// + /// You should use [`RawTable::remove`] instead of this function, + /// or be careful with calling this function directly, because compiler + /// calls its destructor when readed `value` goes out of scope. It + /// can cause double dropping when [`RawTable`] goes out of scope, + /// because of not erased `data control byte`. + /// + /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html + /// [`RawTable`]: crate::hashbrown::fork::raw::RawTable + /// [`RawTable::remove`]: crate::hashbrown::fork::raw::RawTable::remove + #[inline] + pub(crate) unsafe fn read(&self) -> T { + self.as_ptr().read() + } + + /// Overwrites a memory location with the given `value` without reading + /// or dropping the old value (like [`ptr::write`] function). + /// + /// # Safety + /// + /// See [`ptr::write`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + #[inline] + pub(crate) unsafe fn write(&self, val: T) { + self.as_ptr().write(val); + } + + /// Returns a shared immutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_ref`] for safety concerns. + /// + /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); + /// + /// assert_eq!( + /// unsafe { bucket.as_ref() }, + /// &("A pony", "is a small horse".to_owned()) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` + #[inline] + pub unsafe fn as_ref<'a>(&self) -> &'a T { + &*self.as_ptr() + } + + /// Returns a unique mutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_mut`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); + /// + /// unsafe { + /// bucket + /// .as_mut() + /// .1 + /// .push_str(" less than 147 cm at the withers") + /// }; + /// assert_eq!( + /// unsafe { bucket.as_ref() }, + /// &( + /// "A pony", + /// "is a small horse less than 147 cm at the withers".to_owned() + /// ) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` + #[inline] + pub unsafe fn as_mut<'a>(&self) -> &'a mut T { + &mut *self.as_ptr() + } + + /// Copies `size_of` bytes from `other` to `self`. The source + /// and destination may *not* overlap. + /// + /// # Safety + /// + /// See [`ptr::copy_nonoverlapping`] for safety concerns. + /// + /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of + /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values + /// in the region beginning at `*self` and the region beginning at `*other` can + /// [violate memory safety]. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html + /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html + /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + #[cfg(feature = "raw")] + #[inline] + pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { + self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1); + } +} + +/// A raw hash table with an unsafe API. +pub struct RawTable { + table: RawTableInner, + // Tell dropck that we own instances of T. + marker: PhantomData, +} + +/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless +/// of how many different key-value types are used. +struct RawTableInner { + // Mask to get an index from a hash value. The value is one less than the + // number of buckets in the table. + bucket_mask: usize, + + // [Padding], T1, T2, ..., Tlast, C1, C2, ... + // ^ points here + ctrl: NonNull, + + // Number of elements that can be inserted before we need to grow the table + growth_left: usize, + + // Number of elements in the table, only really used by len() + items: usize, + + alloc: A, +} + +impl RawTable { + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub const fn new() -> Self { + Self { + table: RawTableInner::new_in(Global), + marker: PhantomData, + } + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity(capacity: usize) -> Result { + Self::try_with_capacity_in(capacity, Global) + } + + /// Allocates a new hash table with at least enough capacity for inserting + /// the given number of elements without reallocating. + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_in(capacity, Global) + } +} + +impl RawTable { + const TABLE_LAYOUT: TableLayout = TableLayout::new::(); + const DATA_NEEDS_DROP: bool = mem::needs_drop::(); + + /// Creates a new empty hash table without allocating any memory, using the + /// given allocator. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub const fn new_in(alloc: A) -> Self { + Self { + table: RawTableInner::new_in(alloc), + marker: PhantomData, + } + } + + /// Allocates a new hash table with the given number of buckets. + /// + /// The control bytes are left uninitialized. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: A, + buckets: usize, + fallibility: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + Ok(Self { + table: RawTableInner::new_uninitialized( + alloc, + Self::TABLE_LAYOUT, + buckets, + fallibility, + )?, + marker: PhantomData, + }) + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + fn fallible_with_capacity( + alloc: A, + capacity: usize, + fallibility: Fallibility, + ) -> Result { + Ok(Self { + table: RawTableInner::fallible_with_capacity( + alloc, + Self::TABLE_LAYOUT, + capacity, + fallibility, + )?, + marker: PhantomData, + }) + } + + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) + } + + /// Allocates a new hash table using the given allocator, with at least enough capacity for + /// inserting the given number of elements without reallocating. + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { + Ok(capacity) => capacity, + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + &self.table.alloc + } + + /// Deallocates the table without dropping any entries. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn free_buckets(&mut self) { + self.table.free_buckets(Self::TABLE_LAYOUT); + } + + /// Returns pointer to one past last element of data table. + #[inline] + pub unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) + } + + /// Returns pointer to start of data table. + #[inline] + #[cfg(any(feature = "raw", feature = "nightly"))] + pub unsafe fn data_start(&self) -> NonNull { + NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets())) + } + + /// Return the information about memory allocated by the table. + /// + /// `RawTable` allocates single memory block to store both data and metadata. + /// This function returns allocation size and alignment and the beginning of the area. + /// These are the arguments which will be passed to `dealloc` when the table is dropped. + /// + /// This function might be useful for memory profiling. + #[inline] + #[cfg(feature = "raw")] + pub fn allocation_info(&self) -> (NonNull, Layout) { + self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) + } + + /// Returns the index of a bucket from a `Bucket`. + #[inline] + pub unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { + bucket.to_base_index(self.data_end()) + } + + /// Returns a pointer to an element in the table. + #[inline] + pub unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.table.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + /// Erases an element from the table without dropping it. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn erase_no_drop(&mut self, item: &Bucket) { + let index = self.bucket_index(item); + self.table.erase(index); + } + + /// Erases an element from the table, dropping it in place. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + pub unsafe fn erase(&mut self, item: Bucket) { + // Erase the element from the table first since drop might panic. + self.erase_no_drop(&item); + item.drop(); + } + + /// Finds and erases an element from the table, dropping it in place. + /// Returns true if an element was found. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { + into_ok(self.erase_entry_with(&mut (), hash, infallible_eq(eq))) + } + + /// Finds and erases an element from the table, dropping it in place. + /// Returns true if an element was found. + /// + /// This variant supports a fallible hasher with a passed around context. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn erase_entry_with( + &mut self, + cx: &mut C, + hash: u64, + eq: impl FnMut(&mut C, &T) -> Result, + ) -> Result { + // Avoid `Option::map` because it bloats LLVM IR. + if let Some(bucket) = self.find_with(cx, hash, eq)? { + unsafe { + self.erase(bucket); + } + Ok(true) + } else { + Ok(false) + } + } + + /// Removes an element from the table, returning it. + /// + /// This also returns an `InsertSlot` pointing to the newly free bucket. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + pub unsafe fn remove(&mut self, item: Bucket) -> (T, InsertSlot) { + self.erase_no_drop(&item); + ( + item.read(), + InsertSlot { + index: self.bucket_index(&item), + }, + ) + } + + /// Finds and removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { + into_ok(self.remove_entry_with(&mut (), hash, infallible_eq(eq))) + } + + /// Finds and removes an element from the table, returning it. + /// + /// This variant supports a fallible hasher with a passed around context. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry_with( + &mut self, + cx: &mut C, + hash: u64, + eq: impl FnMut(&mut C, &T) -> Result, + ) -> Result, E> { + // Avoid `Option::map` because it bloats LLVM IR. + Ok(match self.find_with(cx, hash, eq)? { + Some(bucket) => Some(unsafe { self.remove(bucket).0 }), + None => None, + }) + } + + /// Marks all table buckets as empty without dropping their contents. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear_no_drop(&mut self) { + self.table.clear_no_drop(); + } + + /// Removes all elements from the table without freeing the backing memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + if self.is_empty() { + // Special case empty table to avoid surprising O(capacity) time. + return; + } + // Ensure that the table is reset even if one of the drops panic + let mut self_ = guard(self, |self_| self_.clear_no_drop()); + unsafe { + self_.drop_elements(); + } + } + + unsafe fn drop_elements(&mut self) { + if Self::DATA_NEEDS_DROP && !self.is_empty() { + for item in self.iter() { + item.drop(); + } + } + } + + /// Shrinks the table to fit `max(self.len(), min_size)` elements. + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { + into_ok(self.shrink_to_with(&mut (), min_size, infallible_hasher(hasher))); + } + + /// Shrinks the table to fit `max(self.len(), min_size)` elements. + /// + /// This variant supports a fallible hasher with a passed around context. + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_with( + &mut self, + cx: &mut C, + min_size: usize, + hasher: impl Fn(&mut C, &T) -> Result, + ) -> Result<(), E> { + // Calculate the minimal number of elements that we need to reserve + // space for. + let min_size = usize::max(self.table.items, min_size); + if min_size == 0 { + *self = Self::new_in(self.table.alloc.clone()); + return Ok(()); + } + + // Calculate the number of buckets that we need for this number of + // elements. If the calculation overflows then the requested bucket + // count must be larger than what we have right and nothing needs to be + // done. + let min_buckets = match capacity_to_buckets(min_size) { + Some(buckets) => buckets, + None => return Ok(()), + }; + + // If we have more buckets than we need, shrink the table. + if min_buckets < self.buckets() { + // Fast path if the table is empty + if self.table.items == 0 { + *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); + } else { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .resize(cx, min_size, hasher, Fallibility::Infallible)? + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + } + + Ok(()) + } + + /// Ensures that at least `additional` items can be inserted into the table + /// without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + into_ok(self.reserve_with(&mut (), additional, infallible_hasher(hasher))) + } + + /// Ensures that at least `additional` items can be inserted into the table + /// without reallocation. + /// + /// This variant supports a fallible hasher with a passed around context. + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve_with( + &mut self, + cx: &mut C, + additional: usize, + hasher: impl Fn(&mut C, &T) -> Result, + ) -> Result<(), E> { + if unlikely(additional > self.table.growth_left) { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .reserve_rehash(cx, additional, hasher, Fallibility::Infallible)? + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + + Ok(()) + } + + /// Tries to ensure that at least `additional` items can be inserted into + /// the table without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + into_ok(self.try_reserve_with(&mut (), additional, infallible_hasher(hasher))) + } + + /// Tries to ensure that at least `additional` items can be inserted into + /// the table without reallocation. + /// + /// This variant supports a fallible hasher with a passed around context. + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve_with( + &mut self, + cx: &mut C, + additional: usize, + hasher: impl Fn(&mut C, &T) -> Result, + ) -> Result, E> { + if additional > self.table.growth_left { + self.reserve_rehash(cx, additional, hasher, Fallibility::Fallible) + } else { + Ok(Ok(())) + } + } + + /// Out-of-line slow path for `reserve` and `try_reserve`. + #[cold] + #[inline(never)] + fn reserve_rehash( + &mut self, + cx: &mut C, + additional: usize, + hasher: impl Fn(&mut C, &T) -> Result, + fallibility: Fallibility, + ) -> Result, E> { + unsafe { + self.table.reserve_rehash_inner( + cx, + additional, + &|table, cx, index| hasher(cx, table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + if Self::DATA_NEEDS_DROP { + Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) + } else { + None + }, + ) + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + fn resize( + &mut self, + cx: &mut C, + capacity: usize, + hasher: impl Fn(&mut C, &T) -> Result, + fallibility: Fallibility, + ) -> Result, E> { + unsafe { + self.table.resize_inner( + cx, + capacity, + &|table, cx, index| hasher(cx, table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + ) + } + } + + /// Inserts a new element into the table, and returns its raw bucket. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { + unsafe { + let mut slot = self.table.find_insert_slot(hash); + + // We can avoid growing the table once we have reached our load + // factor if we are replacing a tombstone. This works since the + // number of EMPTY slots does not change in this case. + let old_ctrl = *self.table.ctrl(slot.index); + if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { + self.reserve(1, hasher); + slot = self.table.find_insert_slot(hash); + } + + self.insert_in_slot(hash, slot, value) + } + } + + /// Attempts to insert a new element without growing the table and return its raw bucket. + /// + /// Returns an `Err` containing the given element if inserting it would require growing the + /// table. + /// + /// This does not check if the given element already exists in the table. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result, T> { + unsafe { + match self.table.prepare_insert_no_grow(hash) { + Ok(index) => { + let bucket = self.bucket(index); + bucket.write(value); + Ok(bucket) + } + Err(()) => Err(value), + } + } + } + + /// Inserts a new element into the table, and returns a mutable reference to it. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { + unsafe { self.insert(hash, value, hasher).as_mut() } + } + + /// Inserts a new element into the table, without growing the table. + /// + /// There must be enough space in the table to insert the new element. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] + pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { + let (index, old_ctrl) = self.table.prepare_insert_slot(hash); + let bucket = self.table.bucket(index); + + // If we are replacing a DELETED entry then we don't need to update + // the load counter. + self.table.growth_left -= special_is_empty(old_ctrl) as usize; + + bucket.write(value); + self.table.items += 1; + bucket + } + + /// Temporary removes a bucket, applying the given function to the removed + /// element and optionally put back the returned value in the same bucket. + /// + /// Returns `true` if the bucket still contains an element + /// + /// This does not check if the given bucket is actually occupied. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool + where + F: FnOnce(T) -> Option, + { + let index = self.bucket_index(&bucket); + let old_ctrl = *self.table.ctrl(index); + debug_assert!(self.is_bucket_full(index)); + let old_growth_left = self.table.growth_left; + let item = self.remove(bucket).0; + if let Some(new_item) = f(item) { + self.table.growth_left = old_growth_left; + self.table.set_ctrl(index, old_ctrl); + self.table.items += 1; + self.bucket(index).write(new_item); + true + } else { + false + } + } + + /// Searches for an element in the table. If the element is not found, + /// returns `Err` with the position of a slot where an element with the + /// same hash could be inserted. + /// + /// This function may resize the table if additional space is required for + /// inserting an element. + #[inline] + pub fn find_or_find_insert_slot( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Result, InsertSlot> { + into_ok(self.find_or_find_insert_slot_with( + &mut (), + hash, + infallible_eq(eq), + infallible_hasher(hasher), + )) + } + + /// Searches for an element in the table. If the element is not found, + /// returns `Err` with the position of a slot where an element with the + /// same hash could be inserted. + /// + /// This function may resize the table if additional space is required for + /// inserting an element. + /// + /// This variant supports a fallible hasher with a passed around context. + #[inline] + pub fn find_or_find_insert_slot_with( + &mut self, + cx: &mut C, + hash: u64, + mut eq: impl FnMut(&mut C, &T) -> Result, + hasher: impl Fn(&mut C, &T) -> Result, + ) -> Result, InsertSlot>, E> { + self.reserve_with(cx, 1, hasher)?; + + match self + .table + .find_or_find_insert_slot_inner(cx, hash, &mut |cx, index| unsafe { + eq(cx, self.bucket(index).as_ref()) + })? { + Ok(index) => Ok(Ok(unsafe { self.bucket(index) })), + Err(slot) => Ok(Err(slot)), + } + } + + /// Inserts a new element into the table in the given slot, and returns its + /// raw bucket. + /// + /// # Safety + /// + /// `slot` must point to a slot previously returned by + /// `find_or_find_insert_slot`, and no mutation of the table must have + /// occurred since that call. + #[inline] + pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket { + let old_ctrl = *self.table.ctrl(slot.index); + self.table.record_item_insert_at(slot.index, old_ctrl, hash); + + let bucket = self.bucket(slot.index); + bucket.write(value); + bucket + } + + /// Searches for an element in the table. + #[inline] + pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option> { + into_ok(self.find_with(&mut (), hash, infallible_eq(eq))) + } + + /// Searches for an element in the table. + /// + /// This variant supports a fallible hasher with a passed around context. + #[inline] + pub fn find_with( + &self, + cx: &mut C, + hash: u64, + mut eq: impl FnMut(&mut C, &T) -> Result, + ) -> Result>, E> { + let result = self.table.find_inner(cx, hash, &mut |cx, index| unsafe { + eq(cx, self.bucket(index).as_ref()) + })?; + + // Avoid `Option::map` because it bloats LLVM IR. + Ok(match result { + Some(index) => Some(unsafe { self.bucket(index) }), + None => None, + }) + } + + /// Gets a reference to an element in the table. + #[inline] + pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + into_ok(self.get_with(&mut (), hash, infallible_eq(eq))) + } + + /// Gets a reference to an element in the table. + /// + /// This variant supports a fallible hasher with a passed around context. + #[inline] + pub fn get_with( + &self, + cx: &mut C, + hash: u64, + eq: impl FnMut(&mut C, &T) -> Result, + ) -> Result, E> { + // Avoid `Option::map` because it bloats LLVM IR. + Ok(match self.find_with(cx, hash, eq)? { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + }) + } + + /// Gets a mutable reference to an element in the table. + #[inline] + pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + into_ok(self.get_mut_with(&mut (), hash, infallible_eq(eq))) + } + + /// Gets a mutable reference to an element in the table. + /// + /// This variant supports a fallible hasher with a passed around context. + #[inline] + pub fn get_mut_with( + &mut self, + cx: &mut C, + hash: u64, + eq: impl FnMut(&mut C, &T) -> Result, + ) -> Result, E> { + // Avoid `Option::map` because it bloats LLVM IR. + Ok(match self.find_with(cx, hash, eq)? { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + }) + } + + /// Attempts to get mutable references to `N` entries in the table at once. + /// + /// Returns an array of length `N` with the results of each query. + /// + /// At most one mutable reference will be returned to any entry. `None` will be returned if any + /// of the hashes are duplicates. `None` will be returned if the hash is not found. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + pub fn get_many_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + unsafe { + let ptrs = self.get_many_mut_pointers(hashes, eq)?; + + for (i, &cur) in ptrs.iter().enumerate() { + if ptrs[..i].iter().any(|&prev| ptr::eq::(prev, cur)) { + return None; + } + } + // All bucket are distinct from all previous buckets so we're clear to return the result + // of the lookup. + + // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. + Some(mem::transmute_copy(&ptrs)) + } + } + + pub unsafe fn get_many_unchecked_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + let ptrs = self.get_many_mut_pointers(hashes, eq)?; + Some(mem::transmute_copy(&ptrs)) + } + + unsafe fn get_many_mut_pointers( + &mut self, + hashes: [u64; N], + mut eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[*mut T; N]> { + // TODO use `MaybeUninit::uninit_array` here instead once that's stable. + let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit(); + let outs_ptr = outs.as_mut_ptr(); + + for (i, &hash) in hashes.iter().enumerate() { + let cur = self.find(hash, |k| eq(i, k))?; + *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut(); + } + + // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. + Some(outs.assume_init()) + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the table might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + #[inline] + pub fn capacity(&self) -> usize { + self.table.items + self.table.growth_left + } + + /// Returns the number of elements in the table. + #[inline] + pub fn len(&self) -> usize { + self.table.items + } + + /// Returns `true` if the table contains no elements. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of buckets in the table. + #[inline] + pub fn buckets(&self) -> usize { + self.table.bucket_mask + 1 + } + + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + pub unsafe fn is_bucket_full(&self, index: usize) -> bool { + self.table.is_bucket_full(index) + } + + /// Returns an iterator over every element in the table. It is up to + /// the caller to ensure that the `RawTable` outlives the `RawIter`. + /// Because we cannot make the `next` method unsafe on the `RawIter` + /// struct, we have to make the `iter` method unsafe. + #[inline] + pub unsafe fn iter(&self) -> RawIter { + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), + items: self.table.items, + } + } + + /// Returns an iterator over occupied buckets that could match a given hash. + /// + /// `RawTable` only stores 7 bits of the hash value, so this iterator may + /// return items that have a hash value different than the one provided. You + /// should always validate the returned values before using them. + /// + /// It is up to the caller to ensure that the `RawTable` outlives the + /// `RawIterHash`. Because we cannot make the `next` method unsafe on the + /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash { + RawIterHash::new(self, hash) + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> RawDrain<'_, T, A> { + unsafe { + let iter = self.iter(); + self.drain_iter_from(iter) + } + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drain_iter_from(&mut self, iter: RawIter) -> RawDrain<'_, T, A> { + debug_assert_eq!(iter.len(), self.len()); + RawDrain { + iter, + table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), + orig_table: NonNull::from(self), + marker: PhantomData, + } + } + + /// Returns an iterator which consumes all elements from the table. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { + debug_assert_eq!(iter.len(), self.len()); + + let allocation = self.into_allocation(); + RawIntoIter { + iter, + allocation, + marker: PhantomData, + } + } + + /// Converts the table into a raw allocation. The contents of the table + /// should be dropped using a `RawIter` before freeing the allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout, A)> { + let alloc = if self.table.is_empty_singleton() { + None + } else { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = + match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + Some(( + unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, + layout, + unsafe { ptr::read(&self.table.alloc) }, + )) + }; + mem::forget(self); + alloc + } +} + +unsafe impl Send for RawTable +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawTable +where + T: Sync, + A: Sync, +{ +} + +impl RawTableInner { + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never accessed + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + const fn new_in(alloc: A) -> Self { + Self { + // Be careful to cast the entire slice to a raw pointer. + ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, + bucket_mask: 0, + items: 0, + growth_left: 0, + alloc, + } + } +} + +impl RawTableInner { + /// Allocates a new [`RawTableInner`] with the given number of buckets. + /// The control bytes and buckets are left uninitialized. + /// + /// # Safety + /// + /// The caller of this function must ensure that the `buckets` is power of two + /// and also initialize all control bytes of the length `self.bucket_mask + 1 + + /// Group::WIDTH` with the [`EMPTY`] bytes. + /// + /// See also [`Allocator`] API for other safety concerns. + /// + /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: A, + table_layout: TableLayout, + buckets: usize, + fallibility: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) { + Some(lco) => lco, + None => return Err(fallibility.capacity_overflow()), + }; + + let ptr: NonNull = match do_alloc(&alloc, layout) { + Ok(block) => block.cast(), + Err(_) => return Err(fallibility.alloc_err(layout)), + }; + + // SAFETY: null pointer will be caught in above check + let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); + Ok(Self { + ctrl, + bucket_mask: buckets - 1, + items: 0, + growth_left: bucket_mask_to_capacity(buckets - 1), + alloc, + }) + } + + /// Attempts to allocate a new [`RawTableInner`] with at least enough + /// capacity for inserting the given number of elements without reallocating. + /// + /// All the control bytes are initialized with the [`EMPTY`] bytes. + #[inline] + fn fallible_with_capacity( + alloc: A, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result { + if capacity == 0 { + Ok(Self::new_in(alloc)) + } else { + // SAFETY: We checked that we could successfully allocate the new table, and then + // initialized all control bytes with the constant `EMPTY` byte. + unsafe { + let buckets = + capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; + + let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); + + Ok(result) + } + } + } + + /// Fixes up an insertion slot due to false positives for groups smaller than the group width. + /// This must only be used on insertion slots found by `find_insert_slot_in_group`. + #[inline] + unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot { + // In tables smaller than the group width + // (self.buckets() < Group::WIDTH), trailing control + // bytes outside the range of the table are filled with + // EMPTY entries. These will unfortunately trigger a + // match, but once masked may point to a full bucket that + // is already occupied. We detect this situation here and + // perform a second scan starting at the beginning of the + // table. This second scan is guaranteed to find an empty + // slot (due to the load factor) before hitting the trailing + // control bytes (containing EMPTY). + if unlikely(self.is_bucket_full(index)) { + debug_assert!(self.bucket_mask < Group::WIDTH); + // SAFETY: + // + // * We are in range and `ptr = self.ctrl(0)` are valid for reads + // and properly aligned, because the table is already allocated + // (see `TableLayout::calculate_layout_for` and `ptr::read`); + // + // * For tables larger than the group width (self.buckets() >= Group::WIDTH), + // we will never end up in the given branch, since + // `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` cannot + // return a full bucket index. For tables smaller than the group width, calling the + // `unwrap_unchecked` function is also + // safe, as the trailing control bytes outside the range of the table are filled + // with EMPTY bytes, so this second scan either finds an empty slot (due to the + // load factor) or hits the trailing control bytes (containing EMPTY). + index = Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit() + .unwrap_unchecked(); + } + InsertSlot { index } + } + + /// Finds the position to insert something in a group. + /// This may have false positives and must be fixed up with `fix_insert_slot` before it's used. + #[inline] + fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option { + let bit = group.match_empty_or_deleted().lowest_set_bit(); + + if likely(bit.is_some()) { + Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask) + } else { + None + } + } + + /// Searches for an element in the table, or a potential slot where that + /// element could be inserted. + /// + /// This uses dynamic dispatch to reduce the amount of code generated for + /// the `eq` argument, but that is eliminated by LLVM optimizations. + #[inline] + fn find_or_find_insert_slot_inner( + &self, + cx: &mut C, + hash: u64, + eq: &mut dyn FnMut(&mut C, usize) -> Result, + ) -> Result, E> { + let mut insert_slot = None; + + let h2_hash = h2(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_byte(h2_hash) { + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(cx, index)?) { + return Ok(Ok(index)); + } + } + + // We didn't find the element we were looking for in the group, try to get an + // insertion slot from the group if we don't have one yet. + if likely(insert_slot.is_none()) { + insert_slot = self.find_insert_slot_in_group(&group, &probe_seq); + } + + // Only stop the search if the group contains at least one empty element. + // Otherwise, the element that we are looking for might be in a following group. + if likely(group.match_empty().any_bit_set()) { + // We must have found a insert slot by now, since the current group contains at + // least one. For tables smaller than the group width, there will still be an + // empty element in the current (and only) group due to the load factor. + unsafe { + return Ok(Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()))); + } + } + + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element and sets the hash for that slot. + /// + /// There must be at least 1 empty bucket in the table. + #[inline] + unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { + let index = self.find_insert_slot(hash).index; + let old_ctrl = *self.ctrl(index); + self.set_ctrl_h2(index, hash); + (index, old_ctrl) + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element, returning the `index` for the new [`Bucket`]. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// # Note + /// + /// Calling this function is always safe, but attempting to write data at + /// the index returned by this function when the table is less than the group width + /// and if there was not at least one empty bucket in the table will cause immediate + /// [`undefined behavior`]. This is because in this case the function will return + /// `self.bucket_mask + 1` as an index due to the trailing EMPTY control bytes outside + /// the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + fn find_insert_slot(&self, hash: u64) -> InsertSlot { + let mut probe_seq = self.probe_seq(hash); + loop { + // SAFETY: + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because mumber of + // buckets is a power of two (see comment for masking below). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new_in). + unsafe { + let group = Group::load(self.ctrl(probe_seq.pos)); + let index = self.find_insert_slot_in_group(&group, &probe_seq); + + if likely(index.is_some()) { + return self.fix_insert_slot(index.unwrap_unchecked()); + } + } + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an element in a table, returning the `index` of the found element. + /// This uses dynamic dispatch to reduce the amount of code generated, but it is + /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, + /// this function will also never return (will go into an infinite loop). + #[inline(always)] + fn find_inner( + &self, + cx: &mut C, + hash: u64, + eq: &mut dyn FnMut(&mut C, usize) -> Result, + ) -> Result, E> { + let h2_hash = h2(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + // SAFETY: + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask`. + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new_in). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_byte(h2_hash) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(cx, index)?) { + return Ok(Some(index)); + } + } + + if likely(group.match_empty().any_bit_set()) { + return Ok(None); + } + + probe_seq.move_next(self.bucket_mask); + } + } + + /// Prepares for rehashing data in place (that is, without allocating new memory). + /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control + /// bytes to `EMPTY`, i.e. performs the following conversion: + /// + /// - `EMPTY` control bytes -> `EMPTY`; + /// - `DELETED` control bytes -> `EMPTY`; + /// - `FULL` control bytes -> `DELETED`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The caller of this function must convert the `DELETED` bytes back to `FULL` + /// bytes when re-inserting them into their ideal position (which was impossible + /// to do during the first insert due to tombstones). If the caller does not do + /// this, then calling this function may result in a memory leak. + /// + /// Calling this function on a table that has not been allocated results in + /// [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::mut_mut)] + #[inline] + unsafe fn prepare_rehash_in_place(&mut self) { + // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY. + // This effectively frees up all buckets containing a DELETED entry. + // + // SAFETY: + // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`; + // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned` + // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`; + // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated; + // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0 + // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for). + for i in (0..self.buckets()).step_by(Group::WIDTH) { + let group = Group::load_aligned(self.ctrl(i)); + let group = group.convert_special_to_empty_and_full_to_deleted(); + group.store_aligned(self.ctrl(i)); + } + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + // + // SAFETY: The caller of this function guarantees that [`RawTableInner`] + // has already been allocated + if unlikely(self.buckets() < Group::WIDTH) { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + self.ctrl(0) + .copy_to(self.ctrl(Group::WIDTH), self.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + self.ctrl(0) + .copy_to(self.ctrl(self.buckets()), Group::WIDTH); + } + } + + #[inline] + unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + #[inline] + unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + let base: *mut u8 = self.data_end().as_ptr(); + base.sub((index + 1) * size_of) + } + + #[inline] + unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + } + + /// Returns an iterator-like object for a probe sequence on the table. + /// + /// This iterator never terminates, but is guaranteed to visit each bucket + /// group exactly once. The loop using `probe_seq` must terminate upon + /// reaching a group containing an empty bucket. + #[inline] + fn probe_seq(&self, hash: u64) -> ProbeSeq { + ProbeSeq { + pos: h1(hash) & self.bucket_mask, + stride: 0, + } + } + + /// Returns the index of a bucket for which a value must be inserted if there is enough rooom + /// in the table, otherwise returns error + #[cfg(feature = "raw")] + #[inline] + unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result { + let index = self.find_insert_slot(hash).index; + let old_ctrl = *self.ctrl(index); + if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { + Err(()) + } else { + self.record_item_insert_at(index, old_ctrl, hash); + Ok(index) + } + } + + #[inline] + unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { + self.growth_left -= usize::from(special_is_empty(old_ctrl)); + self.set_ctrl_h2(index, hash); + self.items += 1; + } + + #[inline] + fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { + let probe_seq_pos = self.probe_seq(hash).pos; + let probe_index = + |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; + probe_index(i) == probe_index(new_i) + } + + /// Sets a control byte to the hash, and possibly also the replicated control byte at + /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`] + /// method. Thus, in order to uphold the safety contracts for the method, you must observe the + /// following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`] + self.set_ctrl(index, h2(hash)); + } + + /// Replaces the hash in the control byte at the given index with the provided one, + /// and possibly also replicates the new control byte at the end of the array of control + /// bytes, returning the old control byte. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`] + /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both + /// methods, you must observe the following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`] + let prev_ctrl = *self.ctrl(index); + self.set_ctrl_h2(index, hash); + prev_ctrl + } + + /// Sets a control byte, and possibly also the replicated control byte at + /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { + // Replicate the first Group::WIDTH control bytes at the end of + // the array without using a branch: + // - If index >= Group::WIDTH then index == index2. + // - Otherwise index2 == self.bucket_mask + 1 + index. + // + // The very last replicated control byte is never actually read because + // we mask the initial index for unaligned loads, but we write it + // anyways because it makes the set_ctrl implementation simpler. + // + // If there are fewer buckets than Group::WIDTH then this code will + // replicate the buckets at the end of the trailing group. For example + // with 2 buckets and a group size of 4, the control bytes will look + // like this: + // + // Real | Replicated + // --------------------------------------------- + // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | + // --------------------------------------------- + + // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH` + // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; + + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`] + *self.ctrl(index) = ctrl; + *self.ctrl(index2) = ctrl; + } + + /// Returns a pointer to a control byte. + /// + /// # Safety + /// + /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`], + /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`. + /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH` + /// will return a pointer to the end of the allocated table and it is useless on its own. + /// + /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a + /// table that has not been allocated results in [`Undefined Behavior`]. + /// + /// So to satisfy both requirements you should always follow the rule that + /// `index < self.bucket_mask + 1 + Group::WIDTH` + /// + /// Calling this function on [`RawTableInner`] that are not already allocated is safe + /// for read-only purpose. + /// + /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr()`]: Bucket::as_ptr() + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn ctrl(&self, index: usize) -> *mut u8 { + debug_assert!(index < self.num_ctrl_bytes()); + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`] + self.ctrl.as_ptr().add(index) + } + + #[inline] + fn buckets(&self) -> usize { + self.bucket_mask + 1 + } + + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + unsafe fn is_bucket_full(&self, index: usize) -> bool { + debug_assert!(index < self.buckets()); + is_full(*self.ctrl(index)) + } + + #[inline] + fn num_ctrl_bytes(&self) -> usize { + self.bucket_mask + 1 + Group::WIDTH + } + + #[inline] + fn is_empty_singleton(&self) -> bool { + self.bucket_mask == 0 + } + + #[allow(clippy::mut_mut)] + #[inline] + unsafe fn prepare_resize( + &self, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result< + crate::hashbrown::fork::scopeguard::ScopeGuard, + TryReserveError, + > { + debug_assert!(self.items <= capacity); + + // Allocate and initialize the new table. + let mut new_table = RawTableInner::fallible_with_capacity( + self.alloc.clone(), + table_layout, + capacity, + fallibility, + )?; + new_table.growth_left -= self.items; + new_table.items = self.items; + + // The hash function may panic, in which case we simply free the new + // table without dropping any elements that may have been copied into + // it. + // + // This guard is also used to free the old table on success, see + // the comment at the bottom of this function. + Ok(guard(new_table, move |self_| { + if !self_.is_empty_singleton() { + self_.free_buckets(table_layout); + } + })) + } + + /// Reserves or rehashes to make room for `additional` more elements. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn reserve_rehash_inner( + &mut self, + cx: &mut C, + additional: usize, + hasher: &dyn Fn(&mut Self, &mut C, usize) -> Result, + fallibility: Fallibility, + layout: TableLayout, + drop: Option, + ) -> Result, E> { + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let new_items = match self.items.checked_add(additional) { + Some(new_items) => new_items, + None => return Ok(Err(fallibility.capacity_overflow())), + }; + let full_capacity = bucket_mask_to_capacity(self.bucket_mask); + if new_items <= full_capacity / 2 { + // Rehash in-place without re-allocating if we have plenty of spare + // capacity that is locked up due to DELETED entries. + self.rehash_in_place(cx, hasher, layout.size, drop)?; + Ok(Ok(())) + } else { + // Otherwise, conservatively resize to at least the next size up + // to avoid churning deletes into frequent rehashes. + self.resize_inner( + cx, + usize::max(new_items, full_capacity + 1), + hasher, + fallibility, + layout, + ) + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn resize_inner( + &mut self, + cx: &mut C, + capacity: usize, + hasher: &dyn Fn(&mut Self, &mut C, usize) -> Result, + fallibility: Fallibility, + layout: TableLayout, + ) -> Result, E> { + let mut new_table = match self.prepare_resize(layout, capacity, fallibility) { + Ok(new_table) => new_table, + Err(error) => return Ok(Err(error)), + }; + + // Copy all elements to the new table. + for i in 0..self.buckets() { + if !self.is_bucket_full(i) { + continue; + } + + // This may panic. + let hash = hasher(self, cx, i)?; + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let (index, _) = new_table.prepare_insert_slot(hash); + + ptr::copy_nonoverlapping( + self.bucket_ptr(i, layout.size), + new_table.bucket_ptr(index, layout.size), + layout.size, + ); + } + + // We successfully copied all elements without panicking. Now replace + // self with the new table. The old table will have its memory freed but + // the items will not be dropped (since they have been moved into the + // new table). + mem::swap(self, &mut new_table); + + Ok(Ok(())) + } + + /// Rehashes the contents of the table in place (i.e. without changing the + /// allocation). + /// + /// If `hasher` panics then some the table's contents may be lost. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[cfg_attr(feature = "inline-more", inline(always))] + #[cfg_attr(not(feature = "inline-more"), inline)] + unsafe fn rehash_in_place( + &mut self, + cx: &mut C, + hasher: &dyn Fn(&mut Self, &mut C, usize) -> Result, + size_of: usize, + drop: Option, + ) -> Result<(), E> { + // If the hash function panics then properly clean up any elements + // that we haven't rehashed yet. We unfortunately can't preserve the + // element since we lost their hash and have no way of recovering it + // without risking another panic. + self.prepare_rehash_in_place(); + + let mut guard = guard(self, move |self_| { + if let Some(drop) = drop { + for i in 0..self_.buckets() { + if *self_.ctrl(i) == DELETED { + self_.set_ctrl(i, EMPTY); + drop(self_.bucket_ptr(i, size_of)); + self_.items -= 1; + } + } + } + self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; + }); + + // At this point, DELETED elements are elements that we haven't + // rehashed yet. Find them and re-insert them at their ideal + // position. + 'outer: for i in 0..guard.buckets() { + if *guard.ctrl(i) != DELETED { + continue; + } + + let i_p = guard.bucket_ptr(i, size_of); + + 'inner: loop { + // Hash the current item + let hash = hasher(*guard, cx, i)?; + + // Search for a suitable place to put it + let new_i = guard.find_insert_slot(hash).index; + + // Probing works by scanning through all of the control + // bytes in groups, which may not be aligned to the group + // size. If both the new and old position fall within the + // same unaligned group, then there is no benefit in moving + // it and we can just continue to the next item. + if likely(guard.is_in_same_group(i, new_i, hash)) { + guard.set_ctrl_h2(i, hash); + continue 'outer; + } + + let new_i_p = guard.bucket_ptr(new_i, size_of); + + // We are moving the current item to a new position. Write + // our H2 to the control byte of the new position. + let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); + if prev_ctrl == EMPTY { + guard.set_ctrl(i, EMPTY); + // If the target slot is empty, simply move the current + // element into the new slot and clear the old control + // byte. + ptr::copy_nonoverlapping(i_p, new_i_p, size_of); + continue 'outer; + } else { + // If the target slot is occupied, swap the two elements + // and then continue processing the element that we just + // swapped into the old slot. + debug_assert_eq!(prev_ctrl, DELETED); + ptr::swap_nonoverlapping(i_p, new_i_p, size_of); + continue 'inner; + } + } + } + + guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; + + mem::forget(guard); + Ok(()) + } + + #[inline] + unsafe fn free_buckets(&mut self, table_layout: TableLayout) { + let (ptr, layout) = self.allocation_info(table_layout); + self.alloc.deallocate(ptr, layout); + } + + #[inline] + fn allocation_info(&self, table_layout: TableLayout) -> (NonNull, Layout) { + debug_assert!( + !self.is_empty_singleton(), + "this function can only be called on non-empty tables" + ); + + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + ( + unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, + layout, + ) + } + + #[cfg(feature = "raw")] + fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull, Layout) { + if self.is_empty_singleton() { + (NonNull::dangling(), Layout::new::<()>()) + } else { + self.allocation_info(table_layout) + } + } + + /// Marks all table buckets as empty without dropping their contents. + #[inline] + fn clear_no_drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); + } + } + self.items = 0; + self.growth_left = bucket_mask_to_capacity(self.bucket_mask); + } + + /// Erases the [`Bucket`]'s control byte at the given index so that it does not + /// triggered as full, decreases the `items` of the table and, if it can be done, + /// increases `self.growth_left`. + /// + /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it + /// does not make any changes to the `data` parts of the table. The caller of this + /// function must take care to properly drop the `data`, otherwise calling this + /// function may result in a memory leak. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * It must be the full control byte at the given position; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// Calling this function on a table with no elements is unspecified, but calling subsequent + /// functions is likely to result in [`undefined behavior`] due to overflow subtraction + /// (`self.items -= 1 cause overflow when self.items == 0`). + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn erase(&mut self, index: usize) { + debug_assert!(self.is_bucket_full(index)); + + // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because + // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; + // SAFETY: + // - The caller must uphold the safety contract for `erase` method; + // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask` + let empty_before = Group::load(self.ctrl(index_before)).match_empty(); + let empty_after = Group::load(self.ctrl(index)).match_empty(); + + // Inserting and searching in the map is performed by two key functions: + // + // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED` + // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED` + // slot immediately in the first group, it jumps to the next `Group` looking for it, + // and so on until it has gone through all the groups in the control bytes. + // + // - The `find_inner` function that looks for the index of the desired element by looking + // at all the `FULL` bytes in the group. If it did not find the element right away, and + // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot` + // function may have found a suitable slot in the next group. Therefore, `find_inner` + // jumps further, and if it does not find the desired element and again there is no `EMPTY` + // byte, then it jumps further, and so on. The search stops only if `find_inner` function + // finds the desired element or hits an `EMPTY` slot/byte. + // + // Accordingly, this leads to two consequences: + // + // - The map must have `EMPTY` slots (bytes); + // + // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner` + // function may stumble upon an `EMPTY` byte before finding the desired element and stop + // searching. + // + // Thus it is necessary to check all bytes after and before the erased element. If we are in + // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes + // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as + // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there + // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble + // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well. + // + // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index` + // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH) + // cannot have `DELETED` bytes. + // + // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while + // `trailing_zeros` refers to the bytes at the beginning of a group. + let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { + DELETED + } else { + self.growth_left += 1; + EMPTY + }; + // SAFETY: the caller must uphold the safety contract for `erase` method. + self.set_ctrl(index, ctrl); + self.items -= 1; + } +} + +impl Clone for RawTable { + fn clone(&self) -> Self { + if self.table.is_empty_singleton() { + Self::new_in(self.table.alloc.clone()) + } else { + unsafe { + // Avoid `Result::ok_or_else` because it bloats LLVM IR. + // + // SAFETY: This is safe as we are taking the size of an already allocated table + // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power + // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. + let mut new_table = match Self::new_uninitialized( + self.table.alloc.clone(), + self.table.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + + // Cloning elements may fail (the clone function may panic). But we don't + // need to worry about uninitialized control bits, since: + // 1. The number of items (elements) in the table is zero, which means that + // the control bits will not be readed by Drop function. + // 2. The `clone_from_spec` method will first copy all control bits from + // `self` (thus initializing them). But this will not affect the `Drop` + // function, since the `clone_from_spec` function sets `items` only after + // successfully clonning all elements. + new_table.clone_from_spec(self); + new_table + } + } + } + + fn clone_from(&mut self, source: &Self) { + if source.table.is_empty_singleton() { + // Dereference drops old `self` table + *self = Self::new_in(self.table.alloc.clone()); + } else { + unsafe { + // Make sure that if any panics occurs, we clear the table and + // leave it in an empty state. + let mut guard = guard(&mut *self, |self_| { + self_.clear_no_drop(); + }); + + // First, drop all our elements without clearing the control + // bytes. If this panics then the scope guard will clear the + // table, leaking any elements that were not dropped yet. + // + // This leak is unavoidable: we can't try dropping more elements + // since this could lead to another panic and abort the process. + // + // SAFETY: We clear our table right after dropping the elements, + // so there is no double drop, since `items` will be equal to zero. + guard.drop_elements(); + + // Okay, we've successfully dropped all elements, so we'll just set + // `items` to zero (so that the `Drop` of `RawTable` doesn't try to + // drop all elements twice) and just forget about the guard. + guard.table.items = 0; + mem::forget(guard); + + // If necessary, resize our table to match the source. + if self.buckets() != source.buckets() { + // Skip our drop by using ptr::write. + if !self.table.is_empty_singleton() { + // SAFETY: We have verified that the table is allocated. + self.free_buckets(); + } + (self as *mut Self).write( + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + // + // SAFETY: This is safe as we are taking the size of an already allocated table + // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power + // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. + match Self::new_uninitialized( + self.table.alloc.clone(), + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }, + ); + } + + // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` + // inside the `clone_from_impl` function will take care of that, dropping all + // cloned elements if necessary. The `Drop` of `RawTable` takes care of the rest + // by freeing up the allocated memory. + self.clone_from_spec(source); + } + } + } +} + +/// Specialization of `clone_from` for `Copy` types +trait RawTableClone { + unsafe fn clone_from_spec(&mut self, source: &Self); +} +impl RawTableClone for RawTable { + default_fn! { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + self.clone_from_impl(source); + } + } +} +#[cfg(feature = "nightly")] +impl RawTableClone for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + source + .data_start() + .as_ptr() + .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets()); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } +} + +impl RawTable { + /// Common code for clone and clone_from. Assumes: + /// - `self.buckets() == source.buckets()`. + /// - Any existing elements have been dropped. + /// - The control bytes are not initialized yet. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_impl(&mut self, source: &Self) { + // Copy the control bytes unchanged. We do this in a single pass + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + + // The cloning of elements may panic, in which case we need + // to make sure we drop only the elements that have been + // cloned so far. + let mut guard = guard((0, &mut *self), |(index, self_)| { + if Self::DATA_NEEDS_DROP { + for i in 0..=*index { + if self_.is_bucket_full(i) { + self_.bucket(i).drop(); + } + } + } + }); + + for from in source.iter() { + let index = source.bucket_index(&from); + let to = guard.1.bucket(index); + to.write(from.as_ref().clone()); + + // Update the index in case we need to unwind. + guard.0 = index; + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } + + /// Variant of `clone_from` to use when a hasher is available. + #[cfg(feature = "raw")] + pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { + // If we have enough capacity in the table, just clear it and insert + // elements one by one. We don't do this if we have the same number of + // buckets as the source since we can just copy the contents directly + // in that case. + if self.table.buckets() != source.table.buckets() + && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len() + { + self.clear(); + + let guard_self = guard(&mut *self, |self_| { + // Clear the partially copied table if a panic occurs, otherwise + // items and growth_left will be out of sync with the contents + // of the table. + self_.clear(); + }); + + unsafe { + for item in source.iter() { + // This may panic. + let item = item.as_ref().clone(); + let hash = hasher(&item); + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let (index, _) = guard_self.table.prepare_insert_slot(hash); + guard_self.bucket(index).write(item); + } + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard_self); + + self.table.items = source.table.items; + self.table.growth_left -= source.table.items; + } else { + self.clone_from(source); + } + } +} + +impl Default for RawTable { + #[inline] + fn default() -> Self { + Self::new_in(Default::default()) + } +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } + } + } +} + +impl IntoIterator for RawTable { + type Item = T; + type IntoIter = RawIntoIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> RawIntoIter { + unsafe { + let iter = self.iter(); + self.into_iter_from(iter) + } + } +} + +/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does +/// not track an item count. +pub(crate) struct RawIterRange { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMaskIter, + + // Pointer to the buckets for the current group. + data: Bucket, + + // Pointer to the next group of control bytes, + // Must be aligned to the group size. + next_ctrl: *const u8, + + // Pointer one past the last control byte of this range. + end: *const u8, +} + +impl RawIterRange { + /// Returns a `RawIterRange` covering a subset of a table. + /// + /// The control byte address must be aligned to the group size. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { + debug_assert_ne!(len, 0); + debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + let end = ctrl.add(len); + + // Load the first group and advance ctrl to point to the next group + let current_group = Group::load_aligned(ctrl).match_full(); + let next_ctrl = ctrl.add(Group::WIDTH); + + Self { + current_group: current_group.into_iter(), + data, + next_ctrl, + end, + } + } + + /// Splits a `RawIterRange` into two halves. + /// + /// Returns `None` if the remaining range is smaller than or equal to the + /// group width. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "rayon")] + pub(crate) fn split(mut self) -> (Self, Option>) { + unsafe { + if self.end <= self.next_ctrl { + // Nothing to split if the group that we are current processing + // is the last one. + (self, None) + } else { + // len is the remaining number of elements after the group that + // we are currently processing. It must be a multiple of the + // group size (small tables are caught by the check above). + let len = offset_from(self.end, self.next_ctrl); + debug_assert_eq!(len % Group::WIDTH, 0); + + // Split the remaining elements into two halves, but round the + // midpoint down in case there is an odd number of groups + // remaining. This ensures that: + // - The tail is at least 1 group long. + // - The split is roughly even considering we still have the + // current group to process. + let mid = (len / 2) & !(Group::WIDTH - 1); + + let tail = Self::new( + self.next_ctrl.add(mid), + self.data.next_n(Group::WIDTH).next_n(mid), + len - mid, + ); + debug_assert_eq!( + self.data.next_n(Group::WIDTH).next_n(mid).ptr, + tail.data.ptr + ); + debug_assert_eq!(self.end, tail.end); + self.end = self.next_ctrl.add(mid); + debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); + (self, Some(tail)) + } + } + } + + /// # Safety + /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate + /// after yielding all elements. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn next_impl(&mut self) -> Option> { + loop { + if let Some(index) = self.current_group.next() { + return Some(self.data.next_n(index)); + } + + if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end { + return None; + } + + // We might read past self.end up to the next group boundary, + // but this is fine because it only occurs on tables smaller + // than the group size where the trailing control bytes are all + // EMPTY. On larger tables self.end is guaranteed to be aligned + // to the group size (since tables are power-of-two sized). + self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } + } +} + +// We make raw iterators unconditionally Send and Sync, and let the PhantomData +// in the actual iterator implementations determine the real Send/Sync bounds. +unsafe impl Send for RawIterRange {} +unsafe impl Sync for RawIterRange {} + +impl Clone for RawIterRange { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + next_ctrl: self.next_ctrl, + current_group: self.current_group, + end: self.end, + } + } +} + +impl Iterator for RawIterRange { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + unsafe { + // SAFETY: We set checker flag to true. + self.next_impl::() + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // We don't have an item count, so just guess based on the range size. + let remaining_buckets = if self.end > self.next_ctrl { + unsafe { offset_from(self.end, self.next_ctrl) } + } else { + 0 + }; + + // Add a group width to include the group we are currently processing. + (0, Some(Group::WIDTH + remaining_buckets)) + } +} + +impl FusedIterator for RawIterRange {} + +/// Iterator which returns a raw pointer to every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket (unless `reflect_remove` is called). +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator (unless `reflect_insert` is called). +/// - The order in which the iterator yields bucket is unspecified and may +/// change in the future. +pub struct RawIter { + pub(crate) iter: RawIterRange, + items: usize, +} + +impl RawIter { + const DATA_NEEDS_DROP: bool = mem::needs_drop::(); + + /// Refresh the iterator so that it reflects a removal from the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each removed bucket before `next` is called again. + /// + /// This method should be called _before_ the removal is made. It is not necessary to call this + /// method if you are removing an item that this iterator yielded in the past. + #[cfg(feature = "raw")] + pub unsafe fn reflect_remove(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, false); + } + + /// Refresh the iterator so that it reflects an insertion into the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each insert before `next` is called again. + /// + /// This method does not guarantee that an insertion of a bucket with a greater + /// index than the last one yielded will be reflected in the iterator. + /// + /// This method should be called _after_ the given insert is made. + #[cfg(feature = "raw")] + pub unsafe fn reflect_insert(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, true); + } + + /// Refresh the iterator so that it reflects a change to the state of the given bucket. + #[cfg(feature = "raw")] + unsafe fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { + if b.as_ptr() > self.iter.data.as_ptr() { + // The iterator has already passed the bucket's group. + // So the toggle isn't relevant to this iterator. + return; + } + + if self.iter.next_ctrl < self.iter.end + && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() + { + // The iterator has not yet reached the bucket's group. + // We don't need to reload anything, but we do need to adjust the item count. + + if cfg!(debug_assertions) { + // Double-check that the user isn't lying to us by checking the bucket state. + // To do that, we need to find its control byte. We know that self.iter.data is + // at self.iter.next_ctrl - Group::WIDTH, so we work from there: + let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); + // This method should be called _before_ a removal, or _after_ an insert, + // so in both cases the ctrl byte should indicate that the bucket is full. + assert!(is_full(*ctrl)); + } + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + return; + } + + // The iterator is at the bucket group that the toggled bucket is in. + // We need to do two things: + // + // - Determine if the iterator already yielded the toggled bucket. + // If it did, we're done. + // - Otherwise, update the iterator cached group so that it won't + // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. + // We'll also need to update the item count accordingly. + if let Some(index) = self.iter.current_group.0.lowest_set_bit() { + let next_bucket = self.iter.data.next_n(index); + if b.as_ptr() > next_bucket.as_ptr() { + // The toggled bucket is "before" the bucket the iterator would yield next. We + // therefore don't need to do anything --- the iterator has already passed the + // bucket in question. + // + // The item count must already be correct, since a removal or insert "prior" to + // the iterator's position wouldn't affect the item count. + } else { + // The removed bucket is an upcoming bucket. We need to make sure it does _not_ + // get yielded, and also that it's no longer included in the item count. + // + // NOTE: We can't just reload the group here, both since that might reflect + // inserts we've already passed, and because that might inadvertently unset the + // bits for _other_ removals. If we do that, we'd have to also decrement the + // item count for those other bits that we unset. But the presumably subsequent + // call to reflect for those buckets might _also_ decrement the item count. + // Instead, we _just_ flip the bit for the particular bucket the caller asked + // us to reflect. + let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let was_full = self.iter.current_group.flip(our_bit); + debug_assert_ne!(was_full, is_insert); + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + if cfg!(debug_assertions) { + if b.as_ptr() == next_bucket.as_ptr() { + // The removed bucket should no longer be next + debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index)); + } else { + // We should not have changed what bucket comes next. + debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index)); + } + } + } + } else { + // We must have already iterated past the removed item. + } + } + + unsafe fn drop_elements(&mut self) { + if Self::DATA_NEEDS_DROP && self.len() != 0 { + for item in self { + item.drop(); + } + } + } +} + +impl Clone for RawIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + items: self.items, + } + } +} + +impl Iterator for RawIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + // Inner iterator iterates over buckets + // so it can do unnecessary work if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: We check number of items to yield using `items` field. + self.iter.next_impl::() + }; + + debug_assert!(nxt.is_some()); + self.items -= 1; + + nxt + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for RawIter {} +impl FusedIterator for RawIter {} + +/// Iterator which consumes a table and returns elements. +pub struct RawIntoIter { + iter: RawIter, + allocation: Option<(NonNull, Layout, A)>, + marker: PhantomData, +} + +impl RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawIntoIter +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawIntoIter +where + T: Sync, + A: Sync, +{ +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); + } + } + } +} + +impl Iterator for RawIntoIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { Some(self.iter.next()?.read()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} + +/// Iterator which consumes elements without freeing the table storage. +pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { + iter: RawIter, + + // The table is moved into the iterator for the duration of the drain. This + // ensures that an empty table is left if the drain iterator is leaked + // without dropping. + table: ManuallyDrop>, + orig_table: NonNull>, + + // We don't use a &'a mut RawTable because we want RawDrain to be + // covariant over T. + marker: PhantomData<&'a RawTable>, +} + +impl RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawDrain<'_, T, A> +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawDrain<'_, T, A> +where + T: Sync, + A: Sync, +{ +} + +impl Drop for RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements. Note that this may panic. + self.iter.drop_elements(); + + // Reset the contents of the table now that all elements have been + // dropped. + self.table.clear_no_drop(); + + // Move the now empty table back to its original location. + self.orig_table + .as_ptr() + .copy_from_nonoverlapping(&*self.table, 1); + } + } +} + +impl Iterator for RawDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { + let item = self.iter.next()?; + Some(item.read()) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} + +/// Iterator over occupied buckets that could match a given hash. +/// +/// `RawTable` only stores 7 bits of the hash value, so this iterator may return +/// items that have a hash value different than the one provided. You should +/// always validate the returned values before using them. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields buckets is unspecified and may +/// change in the future. +pub struct RawIterHash { + inner: RawIterHashInner, + _marker: PhantomData, +} + +struct RawIterHashInner { + // See `RawTableInner`'s corresponding fields for details. + // We can't store a `*const RawTableInner` as it would get + // invalidated by the user calling `&mut` methods on `RawTable`. + bucket_mask: usize, + ctrl: NonNull, + + // The top 7 bits of the hash. + h2_hash: u8, + + // The sequence of groups to probe in the search. + probe_seq: ProbeSeq, + + group: Group, + + // The elements within the group with a matching h2-hash. + bitmask: BitMaskIter, +} + +impl RawIterHash { + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + unsafe fn new(table: &RawTable, hash: u64) -> Self { + RawIterHash { + inner: RawIterHashInner::new(&table.table, hash), + _marker: PhantomData, + } + } +} +impl RawIterHashInner { + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + unsafe fn new(table: &RawTableInner, hash: u64) -> Self { + let h2_hash = h2(hash); + let probe_seq = table.probe_seq(hash); + let group = Group::load(table.ctrl(probe_seq.pos)); + let bitmask = group.match_byte(h2_hash).into_iter(); + + RawIterHashInner { + bucket_mask: table.bucket_mask, + ctrl: table.ctrl, + h2_hash, + probe_seq, + group, + bitmask, + } + } +} + +impl Iterator for RawIterHash { + type Item = Bucket; + + fn next(&mut self) -> Option> { + unsafe { + match self.inner.next() { + Some(index) => { + // Can't use `RawTable::bucket` here as we don't have + // an actual `RawTable` reference to use. + debug_assert!(index <= self.inner.bucket_mask); + let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index); + Some(bucket) + } + None => None, + } + } + } +} + +impl Iterator for RawIterHashInner { + type Item = usize; + + fn next(&mut self) -> Option { + unsafe { + loop { + if let Some(bit) = self.bitmask.next() { + let index = (self.probe_seq.pos + bit) & self.bucket_mask; + return Some(index); + } + if likely(self.group.match_empty().any_bit_set()) { + return None; + } + self.probe_seq.move_next(self.bucket_mask); + + // Can't use `RawTableInner::ctrl` here as we don't have + // an actual `RawTableInner` reference to use. + let index = self.probe_seq.pos; + debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH); + let group_ctrl = self.ctrl.as_ptr().add(index); + + self.group = Group::load(group_ctrl); + self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); + } + } + } +} + +#[cfg(test)] +mod test_map { + use super::*; + + fn rehash_in_place(table: &mut RawTable, hasher: impl Fn(&T) -> u64) { + let hasher = infallible_hasher(hasher); + + unsafe { + into_ok(table.table.rehash_in_place( + &mut (), + &|table, cx, index| hasher(cx, table.bucket::(index).as_ref()), + mem::size_of::(), + if mem::needs_drop::() { + Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) + } else { + None + }, + )); + } + } + + #[test] + fn rehash() { + let mut table = RawTable::new(); + let hasher = |i: &u64| *i; + for i in 0..100 { + table.insert(i, i, hasher); + } + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + + rehash_in_place(&mut table, hasher); + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + } + + /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF + /// AN UNINITIALIZED TABLE DURING THE DROP + #[test] + fn test_drop_uninitialized() { + use ::alloc::vec::Vec; + + let table = unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap() + }; + drop(table); + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_drop_zero_items() { + use ::alloc::vec::Vec; + unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + let table = + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap(); + + // WE SIMULATE, AS IT WERE, A FULL TABLE. + + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + table + .table + .ctrl(0) + .write_bytes(EMPTY, table.table.num_ctrl_bytes()); + + // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets() + table.table.ctrl(0).write_bytes(0, table.capacity()); + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if table.buckets() < Group::WIDTH { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH); + } + drop(table); + } + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/neon.rs b/crates/rune/src/hashbrown/fork/raw/neon.rs new file mode 100644 index 000000000..44e82d57d --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/neon.rs @@ -0,0 +1,124 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::arch::aarch64 as neon; +use core::mem; +use core::num::NonZeroU64; + +pub(crate) type BitMaskWord = u64; +pub(crate) type NonZeroBitMaskWord = NonZeroU64; +pub(crate) const BITMASK_STRIDE: usize = 8; +pub(crate) const BITMASK_MASK: BitMaskWord = !0; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a 64-bit NEON value. +#[derive(Copy, Clone)] +pub(crate) struct Group(neon::uint8x8_t); + +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const u8) -> Self { + Group(neon::vld1_u8(ptr)) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(neon::vld1_u8(ptr)) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + neon::vst1_u8(ptr, self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which *may* + /// have the given value. + #[inline] + pub(crate) fn match_byte(self, byte: u8) -> BitMask { + unsafe { + let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + self.match_byte(EMPTY) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + unsafe { + let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub(crate) fn match_full(self) -> BitMask { + unsafe { + let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + unsafe { + let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80))) + } + } +} diff --git a/crates/rune/src/hashbrown/fork/raw/sse2.rs b/crates/rune/src/hashbrown/fork/raw/sse2.rs new file mode 100644 index 000000000..956ba5d26 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/raw/sse2.rs @@ -0,0 +1,149 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::mem; +use core::num::NonZeroU16; + +#[cfg(target_arch = "x86")] +use core::arch::x86; +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as x86; + +pub(crate) type BitMaskWord = u16; +pub(crate) type NonZeroBitMaskWord = NonZeroU16; +pub(crate) const BITMASK_STRIDE: usize = 1; +pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a 128-bit SSE value. +#[derive(Copy, Clone)] +pub(crate) struct Group(x86::__m128i); + +// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + #[allow(clippy::items_after_statements)] + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const u8) -> Self { + Group(x86::_mm_loadu_si128(ptr.cast())) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(x86::_mm_load_si128(ptr.cast())) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + x86::_mm_store_si128(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which have + /// the given value. + #[inline] + pub(crate) fn match_byte(self, byte: u8) -> BitMask { + #[allow( + clippy::cast_possible_wrap, // byte: u8 as i8 + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8)); + BitMask(x86::_mm_movemask_epi8(cmp) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + self.match_byte(EMPTY) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + #[allow( + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask(x86::_mm_movemask_epi8(self.0) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub(crate) fn match_full(&self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + #[allow( + clippy::cast_possible_wrap, // byte: 0x80_u8 as i8 + )] + unsafe { + let zero = x86::_mm_setzero_si128(); + let special = x86::_mm_cmpgt_epi8(zero, self.0); + Group(x86::_mm_or_si128( + special, + x86::_mm_set1_epi8(0x80_u8 as i8), + )) + } + } +} diff --git a/crates/rune/src/hashbrown/fork/scopeguard.rs b/crates/rune/src/hashbrown/fork/scopeguard.rs new file mode 100644 index 000000000..47965a845 --- /dev/null +++ b/crates/rune/src/hashbrown/fork/scopeguard.rs @@ -0,0 +1,73 @@ +// Extracted from the scopeguard crate +use core::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, + ptr, +}; + +pub struct ScopeGuard +where + F: FnMut(&mut T), +{ + dropfn: F, + value: T, +} + +#[inline] +pub fn guard(value: T, dropfn: F) -> ScopeGuard +where + F: FnMut(&mut T), +{ + ScopeGuard { dropfn, value } +} + +impl ScopeGuard +where + F: FnMut(&mut T), +{ + #[allow(dead_code)] + #[inline] + pub fn into_inner(guard: Self) -> T { + // Cannot move out of Drop-implementing types, so + // ptr::read the value out of a ManuallyDrop + // Don't use mem::forget as that might invalidate value + let guard = ManuallyDrop::new(guard); + unsafe { + let value = ptr::read(&guard.value); + // read the closure so that it is dropped + let _ = ptr::read(&guard.dropfn); + value + } + } +} + +impl Deref for ScopeGuard +where + F: FnMut(&mut T), +{ + type Target = T; + #[inline] + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl Drop for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn drop(&mut self) { + (self.dropfn)(&mut self.value); + } +} diff --git a/crates/rune/src/hashbrown/table.rs b/crates/rune/src/hashbrown/table.rs new file mode 100644 index 000000000..f699bc278 --- /dev/null +++ b/crates/rune/src/hashbrown/table.rs @@ -0,0 +1,272 @@ +use core::hash::BuildHasher; +use core::iter; +use core::marker::PhantomData; +use core::mem; +use core::ptr; + +use crate::hashbrown::fork::raw::{RawIter, RawTable}; +use std::collections::hash_map::{DefaultHasher, RandomState}; + +use crate::runtime::{Hasher, ProtocolCaller, RawRef, Ref, Value, VmError, VmResult}; + +#[derive(Clone)] +pub(crate) struct Table { + table: RawTable<(Value, V)>, + state: RandomState, +} + +impl Table { + #[inline(always)] + pub(crate) fn new() -> Self { + Self { + table: RawTable::new(), + state: RandomState::new(), + } + } + + #[inline(always)] + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + table: RawTable::with_capacity(capacity), + state: RandomState::new(), + } + } + + #[inline(always)] + pub(crate) fn len(&self) -> usize { + self.table.len() + } + + #[inline(always)] + pub(crate) fn capacity(&self) -> usize { + self.table.capacity() + } + + #[inline(always)] + pub(crate) fn is_empty(&self) -> bool { + self.table.is_empty() + } + + #[inline(always)] + pub(crate) fn insert_with

( + &mut self, + key: Value, + value: V, + caller: &mut P, + ) -> VmResult> + where + P: ?Sized + ProtocolCaller, + { + let hash = vm_try!(hash(&self.state, &key, caller)); + + let result = match self.table.find_or_find_insert_slot_with( + caller, + hash, + eq(&key), + hasher(&self.state), + ) { + Ok(result) => result, + Err(error) => return VmResult::Err(error), + }; + + let existing = match result { + Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, value)), + Err(slot) => { + unsafe { + self.table.insert_in_slot(hash, slot, (key, value)); + } + None + } + }; + + VmResult::Ok(existing) + } + + pub(crate) fn get

(&self, key: &Value, caller: &mut P) -> VmResult> + where + P: ?Sized + ProtocolCaller, + { + if self.table.is_empty() { + return VmResult::Ok(None); + } + + let hash = vm_try!(hash(&self.state, key, caller)); + VmResult::Ok(vm_try!(self.table.get_with(caller, hash, eq(key)))) + } + + #[inline(always)] + pub(crate) fn remove_with

(&mut self, key: &Value, caller: &mut P) -> VmResult> + where + P: ?Sized + ProtocolCaller, + { + let hash = vm_try!(hash(&self.state, key, caller)); + + match self.table.remove_entry_with(caller, hash, eq(key)) { + Ok(value) => VmResult::Ok(value.map(|(_, value)| value)), + Err(error) => VmResult::Err(error), + } + } + + #[inline(always)] + pub(crate) fn clear(&mut self) { + self.table.clear() + } + + pub(crate) fn iter(&self) -> Iter<'_, V> { + // SAFETY: lifetime is held by returned iterator. + let iter = unsafe { self.table.iter() }; + + Iter { + iter, + _marker: PhantomData, + } + } + + #[inline(always)] + pub(crate) fn iter_ref(this: Ref) -> IterRef { + let (this, _guard) = Ref::into_raw(this); + // SAFETY: Table will be alive and a reference to it held for as long as + // `RawRef` is alive. + let iter = unsafe { this.as_ref().table.iter() }; + IterRef { iter, _guard } + } + + #[inline(always)] + pub(crate) unsafe fn iter_ref_raw(this: ptr::NonNull>) -> RawIter<(Value, V)> { + this.as_ref().table.iter() + } + + #[inline(always)] + pub(crate) fn keys_ref(this: Ref) -> KeysRef { + let (this, _guard) = Ref::into_raw(this); + // SAFETY: Table will be alive and a reference to it held for as long as + // `RawRef` is alive. + let iter = unsafe { this.as_ref().table.iter() }; + KeysRef { iter, _guard } + } + + #[inline(always)] + pub(crate) fn values_ref(this: Ref) -> ValuesRef { + let (this, _guard) = Ref::into_raw(this); + // SAFETY: Table will be alive and a reference to it held for as long as + // `RawRef` is alive. + let iter = unsafe { this.as_ref().table.iter() }; + ValuesRef { iter, _guard } + } +} + +pub(crate) struct Iter<'a, V> { + iter: RawIter<(Value, V)>, + _marker: PhantomData<&'a V>, +} + +impl<'a, V> iter::Iterator for Iter<'a, V> { + type Item = &'a (Value, V); + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: we're still holding onto the `RawRef` guard. + unsafe { Some(self.iter.next()?.as_ref().clone()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +pub(crate) struct IterRef { + iter: RawIter<(Value, V)>, + _guard: RawRef, +} + +impl iter::Iterator for IterRef +where + V: Clone, +{ + type Item = (Value, V); + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: we're still holding onto the `RawRef` guard. + unsafe { Some(self.iter.next()?.as_ref().clone()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +pub(crate) struct KeysRef { + iter: RawIter<(Value, V)>, + _guard: RawRef, +} + +impl iter::Iterator for KeysRef { + type Item = Value; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: we're still holding onto the `RawRef` guard. + unsafe { Some(self.iter.next()?.as_ref().0.clone()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +pub(crate) struct ValuesRef { + iter: RawIter<(Value, V)>, + _guard: RawRef, +} + +impl iter::Iterator for ValuesRef +where + V: Clone, +{ + type Item = V; + + #[inline] + fn next(&mut self) -> Option { + // SAFETY: we're still holding onto the `RawRef` guard. + unsafe { Some(self.iter.next()?.as_ref().1.clone()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// Convenience function to hash a value. +fn hash(state: &S, value: &Value, caller: &mut impl ProtocolCaller) -> VmResult +where + S: BuildHasher, +{ + let mut hasher = Hasher::new_with(state); + vm_try!(value.hash_with(&mut hasher, caller)); + VmResult::Ok(hasher.finish()) +} + +/// Construct a hasher for a value in the table. +fn hasher(state: &S) -> impl Fn(&mut P, &(Value, V)) -> Result + '_ +where + P: ?Sized + ProtocolCaller, + S: BuildHasher, +{ + move |caller, (key, _): &(Value, V)| hash(state, key, caller).into_result() +} + +/// Construct an equality function for a value in the table that will compare an +/// entry with the current key. +fn eq(key: &Value) -> impl Fn(&mut P, &(Value, V)) -> Result + '_ +where + P: ?Sized + ProtocolCaller, +{ + move |caller: &mut P, (other, _): &(Value, V)| -> Result { + key.eq_with(other, caller).into_result() + } +} diff --git a/crates/rune/src/hir/arena.rs b/crates/rune/src/hir/arena.rs index a298c97d7..8c32dfd59 100644 --- a/crates/rune/src/hir/arena.rs +++ b/crates/rune/src/hir/arena.rs @@ -185,7 +185,9 @@ impl Arena { chunks.push(Chunk::new(new_cap)?); let Some(chunk) = chunks.last_mut() else { - return Err(ArenaAllocError { requested: additional }); + return Err(ArenaAllocError { + requested: additional, + }); }; let range = chunk.storage.as_mut_ptr_range(); diff --git a/crates/rune/src/hir/lowering.rs b/crates/rune/src/hir/lowering.rs index 3f038afdb..535a3aad6 100644 --- a/crates/rune/src/hir/lowering.rs +++ b/crates/rune/src/hir/lowering.rs @@ -166,7 +166,10 @@ pub(crate) fn async_block_secondary<'hir>( alloc_with!(cx, ast); let Some(captures) = cx.q.get_captures(captures) else { - return Err(compile::Error::msg(ast, format_args!("Missing captures for hash {captures}"))); + return Err(compile::Error::msg( + ast, + format_args!("Missing captures for hash {captures}"), + )); }; let captures = &*iter!(captures, |capture| { @@ -198,7 +201,10 @@ pub(crate) fn expr_closure_secondary<'hir>( alloc_with!(cx, ast); let Some(captures) = cx.q.get_captures(captures) else { - return Err(compile::Error::msg(ast, format_args!("Missing captures for hash {captures}"))); + return Err(compile::Error::msg( + ast, + format_args!("Missing captures for hash {captures}"), + )); }; let captures = &*iter!(captures, |capture| match capture { @@ -240,12 +246,10 @@ fn expr_call_closure<'hir>( ErrorKind::MissingItem { item: cx.q.pool.item(item.item).to_owned(), }, - )) + )); }; - let meta::Kind::Closure { - call, do_move, .. - } = meta.kind else { + let meta::Kind::Closure { call, do_move, .. } = meta.kind else { return Err(compile::Error::expected_meta( ast, meta.info(cx.q.pool), @@ -368,11 +372,13 @@ pub(crate) fn expr_object<'hir>( let assignments = &mut *iter!(&ast.assignments, |(ast, _)| { let key = object_key(cx, &ast.key)?; - if let Some(existing) = keys_dup.insert(key.1, key.0) { + if let Some(_existing) = keys_dup.insert(key.1, key.0) { return Err(compile::Error::new( key.0, ErrorKind::DuplicateObjectKey { - existing: existing.span(), + #[cfg(feature = "emit")] + existing: _existing.span(), + #[cfg(feature = "emit")] object: key.0.span(), }, )); @@ -382,9 +388,12 @@ pub(crate) fn expr_object<'hir>( Some((_, ast)) => expr(cx, ast)?, None => { let Some((name, _)) = cx.scopes.get(hir::Name::Str(key.1)) else { - return Err(compile::Error::new(key.0, ErrorKind::MissingLocal { - name: key.1.to_owned(), - },)) + return Err(compile::Error::new( + key.0, + ErrorKind::MissingLocal { + name: key.1.to_owned(), + }, + )); }; hir::Expr { @@ -829,7 +838,14 @@ pub(crate) fn expr_unary<'hir>( return Err(compile::Error::new(ast, ErrorKind::UnsupportedRef)); } - let (ast::UnOp::Neg(..), ast::Expr::Lit(ast::ExprLit { lit: ast::Lit::Number(n), .. })) = (ast.op, &*ast.expr) else { + let ( + ast::UnOp::Neg(..), + ast::Expr::Lit(ast::ExprLit { + lit: ast::Lit::Number(n), + .. + }), + ) = (ast.op, &*ast.expr) + else { return Ok(hir::ExprKind::Unary(alloc!(hir::ExprUnary { op: ast.op, expr: expr(cx, &ast.expr)?, @@ -959,7 +975,12 @@ fn expr_break<'hir>( let Some(drop) = cx.scopes.loop_drop(label) else { if let Some(label) = label { - return Err(compile::Error::new(ast, ErrorKind::MissingLoopLabel { label: label.into() })); + return Err(compile::Error::new( + ast, + ErrorKind::MissingLoopLabel { + label: label.into(), + }, + )); } else { return Err(compile::Error::new(ast, ErrorKind::BreakOutsideOfLoop)); } @@ -993,7 +1014,12 @@ fn expr_continue<'hir>( let Some(drop) = cx.scopes.loop_drop(label) else { if let Some(label) = label { - return Err(compile::Error::new(ast, ErrorKind::MissingLoopLabel { label: label.into() })); + return Err(compile::Error::new( + ast, + ErrorKind::MissingLoopLabel { + label: label.into(), + }, + )); } else { return Err(compile::Error::new(ast, ErrorKind::ContinueOutsideOfLoop)); } @@ -1182,11 +1208,13 @@ fn pat<'hir>(cx: &mut Ctxt<'hir, '_, '_>, ast: &ast::Pat) -> compile::Result(cx: &mut Ctxt<'hir, '_, '_>, ast: &ast::Pat) -> compile::Result(cx: &mut Ctxt<'hir, '_, '_>, ast: &ast::Pat) -> compile::Result( if let Some(ast::PathKind::SelfValue) = ast.as_kind() { let Some(..) = cx.scopes.get(hir::Name::SelfValue) else { - return Err(compile::Error::new( - ast, - ErrorKind::MissingSelf, - )); + return Err(compile::Error::new(ast, ErrorKind::MissingSelf)); }; return Ok(hir::ExprKind::Variable(hir::Name::SelfValue)); @@ -1561,10 +1589,7 @@ fn generics_parameters( for (s, _) in generics { let hir::ExprKind::Type(ty) = expr(cx, &s.expr)?.kind else { - return Err(compile::Error::new( - s, - ErrorKind::UnsupportedGenerics, - )); + return Err(compile::Error::new(s, ErrorKind::UnsupportedGenerics)); }; builder.add(ty.into_hash()); @@ -1752,10 +1777,7 @@ fn expr_field_access<'hir>( for (s, _) in generics { let hir::ExprKind::Type(ty) = expr(cx, &s.expr)?.kind else { - return Err(compile::Error::new( - s, - ErrorKind::UnsupportedGenerics, - )); + return Err(compile::Error::new(s, ErrorKind::UnsupportedGenerics)); }; builder.add(ty.into_hash()); diff --git a/crates/rune/src/indexing/index.rs b/crates/rune/src/indexing/index.rs index 0ffdf3586..e5d005f42 100644 --- a/crates/rune/src/indexing/index.rs +++ b/crates/rune/src/indexing/index.rs @@ -95,7 +95,9 @@ impl<'a, 'arena> Indexer<'a, 'arena> { p: &mut attrs::Parser, ast: &mut ast::MacroCall, ) -> compile::Result { - let Some((_, builtin)) = p.try_parse::(resolve_context!(self.q), &ast.attributes)? else { + let Some((_, builtin)) = + p.try_parse::(resolve_context!(self.q), &ast.attributes)? + else { return Ok(false); }; @@ -108,7 +110,7 @@ impl<'a, 'arena> Indexer<'a, 'arena> { ErrorKind::NoSuchBuiltInMacro { name: ast.path.resolve(resolve_context!(self.q))?, }, - )) + )); }; let ident = ident.resolve(resolve_context!(self.q))?; @@ -456,12 +458,13 @@ impl<'a, 'arena> Indexer<'a, 'arena> { .load(root, self.q.pool.module_item(mod_item), &*item_mod)?; if let Some(loaded) = self.loaded.as_mut() { - if let Some(existing) = loaded.insert(mod_item, (self.source_id, item_mod.span())) { + if let Some(_existing) = loaded.insert(mod_item, (self.source_id, item_mod.span())) { return Err(compile::Error::new( &*item_mod, ErrorKind::ModAlreadyLoaded { item: self.q.pool.module_item(mod_item).to_owned(), - existing, + #[cfg(feature = "emit")] + existing: _existing, }, )); } @@ -561,11 +564,11 @@ pub(crate) fn file(idx: &mut Indexer<'_, '_>, ast: &mut ast::File) -> compile::R // for the `item` handler or to be used by the macro_call expansion // below. if let Some(mut attr) = item.remove_first_attribute() { - let Some(file) = idx.expand_attribute_macro::(&mut attr, &mut item)? else { + let Some(file) = idx.expand_attribute_macro::(&mut attr, &mut item)? + else { skipped_attributes.push(attr); - if !matches!(item, ast::Item::MacroCall(_)) && item.attributes().is_empty() - { + if !matches!(item, ast::Item::MacroCall(_)) && item.attributes().is_empty() { // For all we know only non macro attributes remain, which will be // handled by the item handler. *item.attributes_mut() = skipped_attributes; @@ -597,7 +600,10 @@ pub(crate) fn file(idx: &mut Indexer<'_, '_>, ast: &mut ast::File) -> compile::R } let ast::Item::MacroCall(mut macro_call) = item else { - return Err(compile::Error::msg(&item, "Expected attributes on macro call")); + return Err(compile::Error::msg( + &item, + "Expected attributes on macro call", + )); }; macro_call.attributes = skipped_attributes; @@ -751,10 +757,13 @@ fn item_fn(idx: &mut Indexer<'_, '_>, mut ast: ast::ItemFn) -> compile::Result<( let is_test = match p.try_parse::(resolve_context!(idx.q), &ast.attributes)? { Some((attr, _)) => { - if let Some(nested_span) = idx.nested_item { + if let Some(_nested_span) = idx.nested_item { return Err(compile::Error::new( attr, - ErrorKind::NestedTest { nested_span }, + ErrorKind::NestedTest { + #[cfg(feature = "emit")] + nested_span: _nested_span, + }, )); } @@ -765,12 +774,15 @@ fn item_fn(idx: &mut Indexer<'_, '_>, mut ast: ast::ItemFn) -> compile::Result<( let is_bench = match p.try_parse::(resolve_context!(idx.q), &ast.attributes)? { Some((attr, _)) => { - if let Some(nested_span) = idx.nested_item { + if let Some(_nested_span) = idx.nested_item { let span = attr.span().join(ast.descriptive_span()); return Err(compile::Error::new( span, - ErrorKind::NestedBench { nested_span }, + ErrorKind::NestedBench { + #[cfg(feature = "emit")] + nested_span: _nested_span, + }, )); } @@ -802,7 +814,10 @@ fn item_fn(idx: &mut Indexer<'_, '_>, mut ast: ast::ItemFn) -> compile::Result<( } let Some(impl_item) = idx.item.impl_item else { - return Err(compile::Error::new(&ast, ErrorKind::InstanceFunctionOutsideImpl)); + return Err(compile::Error::new( + &ast, + ErrorKind::InstanceFunctionOutsideImpl, + )); }; idx.q.index_and_build(indexing::Entry { @@ -932,6 +947,7 @@ fn statements(idx: &mut Indexer<'_, '_>, ast: &mut Vec) -> compile::R return Err(compile::Error::new( span, ErrorKind::ExpectedBlockSemiColon { + #[cfg(feature = "emit")] followed_span: stmt.span(), }, )); @@ -1466,7 +1482,10 @@ fn item_impl(idx: &mut Indexer<'_, '_>, mut ast: ast::ItemImpl) -> compile::Resu for path_segment in ast.path.as_components() { let Some(ident_segment) = path_segment.try_as_ident() else { - return Err(compile::Error::msg(path_segment, "Unsupported path segment")); + return Err(compile::Error::msg( + path_segment, + "Unsupported path segment", + )); }; let ident = ident_segment.resolve(resolve_context!(idx.q))?; @@ -1626,7 +1645,10 @@ fn item(idx: &mut Indexer<'_, '_>, ast: ast::Item) -> compile::Result<()> { } let Some(queue) = idx.queue.as_mut() else { - return Err(compile::Error::msg(&item_use, "Imports are not supported in this context")); + return Err(compile::Error::msg( + &item_use, + "Imports are not supported in this context", + )); }; let visibility = ast_to_visibility(&item_use.visibility)?; diff --git a/crates/rune/src/internal_macros.rs b/crates/rune/src/internal_macros.rs index 26839f5cc..77e71c03f 100644 --- a/crates/rune/src/internal_macros.rs +++ b/crates/rune/src/internal_macros.rs @@ -10,8 +10,8 @@ macro_rules! resolve_context { /// Build an implementation of `TypeOf` basic of a static type. macro_rules! impl_static_type { - (impl <$($p:ident),*> $ty:ty => $static_type:expr) => { - impl<$($p,)*> $crate::runtime::TypeOf for $ty { + (impl <$($p:ident),*> $ty:ty => $static_type:expr $(, where $($where:tt)+)?) => { + impl<$($p,)*> $crate::runtime::TypeOf for $ty $(where $($where)+)* { #[inline] fn type_hash() -> $crate::Hash { $static_type.hash @@ -23,7 +23,7 @@ macro_rules! impl_static_type { } } - impl<$($p,)*> $crate::runtime::MaybeTypeOf for $ty { + impl<$($p,)*> $crate::runtime::MaybeTypeOf for $ty $(where $($where)+)* { #[inline] fn maybe_type_of() -> Option<$crate::runtime::FullTypeOf> { Some(<$ty as $crate::runtime::TypeOf>::type_of()) @@ -119,7 +119,7 @@ macro_rules! from_value { let value = vm_try!(value.$into()); let value = vm_try!(value.into_ref()); let (value, guard) = $crate::runtime::Ref::into_raw(value); - $crate::runtime::VmResult::Ok((&*value, guard)) + $crate::runtime::VmResult::Ok((value.as_ref(), guard)) } } @@ -131,8 +131,8 @@ macro_rules! from_value { ) -> $crate::runtime::VmResult<(&'a mut Self, Self::Guard)> { let value = vm_try!(value.$into()); let value = vm_try!(value.into_mut()); - let (value, guard) = $crate::runtime::Mut::into_raw(value); - $crate::runtime::VmResult::Ok((&mut *value, guard)) + let (mut value, guard) = $crate::runtime::Mut::into_raw(value); + $crate::runtime::VmResult::Ok((value.as_mut(), guard)) } } diff --git a/crates/rune/src/languageserver/completion.rs b/crates/rune/src/languageserver/completion.rs index f0cfcec4b..65439cf05 100644 --- a/crates/rune/src/languageserver/completion.rs +++ b/crates/rune/src/languageserver/completion.rs @@ -24,8 +24,8 @@ pub(super) fn complete_for_unit( results: &mut Vec, ) { let Some(debug_info) = unit.debug_info() else { - return; - }; + return; + }; for (hash, function) in debug_info.functions.iter() { let func_name = function.to_string(); diff --git a/crates/rune/src/languageserver/state.rs b/crates/rune/src/languageserver/state.rs index 748a516b5..c9bfb1ecd 100644 --- a/crates/rune/src/languageserver/state.rs +++ b/crates/rune/src/languageserver/state.rs @@ -253,10 +253,14 @@ impl<'a> State<'a> { pub(super) fn format(&mut self, uri: &Url) -> Result> { let sources = &mut self.workspace.sources; tracing::trace!(uri = ?uri, uri_exists = sources.get(uri).is_some()); - let Some(workspace_source) = sources.get_mut(uri) else { return Ok(None); }; + let Some(workspace_source) = sources.get_mut(uri) else { + return Ok(None); + }; let source = workspace_source.content.to_string(); - let Ok(formatted) = crate::fmt::layout_source(&source) else { return Ok(None) }; + let Ok(formatted) = crate::fmt::layout_source(&source) else { + return Ok(None); + }; let formatted = String::from_utf8(formatted).context("format produced invalid utf8")?; // Only modify if changed @@ -525,7 +529,10 @@ fn emit_scripts(diagnostics: crate::Diagnostics, build: &Build, reporter: &mut R FatalDiagnosticKind::LinkError(e) => match e { LinkerError::MissingFunction { hash, spans } => { for (span, source_id) in spans { - let (Some(url), Some(source)) = (build.id_to_url.get(source_id), build.sources.get(*source_id)) else { + let (Some(url), Some(source)) = ( + build.id_to_url.get(source_id), + build.sources.get(*source_id), + ) else { continue; }; @@ -732,7 +739,10 @@ where { let span = error.span(); - let (Some(source), Some(url)) = (build.sources.get(source_id), build.id_to_url.get(&source_id)) else { + let (Some(source), Some(url)) = ( + build.sources.get(source_id), + build.id_to_url.get(&source_id), + ) else { return; }; diff --git a/crates/rune/src/lib.rs b/crates/rune/src/lib.rs index a30042ca1..8faf5da9f 100644 --- a/crates/rune/src/lib.rs +++ b/crates/rune/src/lib.rs @@ -5,7 +5,7 @@ //! docs.rs //! chat on discord //!
-//! Minimum support: Rust 1.67+. +//! Minimum support: Rust 1.70+. //!
//!
//! Visit the site 🌐 @@ -208,8 +208,8 @@ mod exported_macros; #[macro_use] pub mod ast; -#[cfg(feature = "fmt")] -pub mod fmt; +#[cfg(all(feature = "fmt", feature = "cli"))] +pub(crate) mod fmt; cfg_emit! { pub use ::codespan_reporting::term::termcolor; @@ -269,6 +269,9 @@ cfg_workspace! { pub mod workspace; } +#[cfg(feature = "std")] +mod hashbrown; + // Macros used internally and re-exported. pub(crate) use rune_macros::__internal_impl_any; @@ -279,10 +282,15 @@ pub(crate) use rune_macros::__internal_impl_any; /// generated Rune documentation. /// * The name of arguments is captured to improve documentation generation. /// * If an instance function is annotated this is detected (if the function -/// receives `self`). This behavior can be forced using `#[rune::function(instance)]` if -/// the function doesn't take `self`. -/// * The name of the function can be set using the `#[rune::function(path = ...)]`. -/// * Instance functions can be made a protocol function `#[rune::function(protocol = STRING_DISPLAY)]`. +/// receives `self`). This behavior can be forced using +/// `#[rune::function(instance)]` if the function doesn't take `self`. +/// * The name of the function can be set using the `#[rune::function(path = +/// name)]` argument. +/// * An associated function can be specified with the `#[rune::function(path = +/// Type::name)]` argument. If `instance` is specified it is an associated +/// instance function that can be defined externally. +/// * Instance functions can be made a protocol function +/// `#[rune::function(protocol = STRING_DISPLAY)]`. /// /// # Instance and associated functions /// @@ -371,7 +379,7 @@ pub(crate) use rune_macros::__internal_impl_any; /// } /// /// /// Construct a new [`Struct`]. -/// #[rune::function(path = Struct::new)] +/// #[rune::function(free, path = Struct::new)] /// fn new() -> Struct { /// Struct { /// /* .. */ @@ -382,6 +390,24 @@ pub(crate) use rune_macros::__internal_impl_any; /// The first part `Struct` in `Struct::new` is used to determine the type /// the function is associated with. /// +/// Protocol functions can either be defined in an impl block or externally. To +/// define a protocol externally, you can simply do this: +/// +/// ```rust +/// # use rune::Any; +/// # use rune::runtime::Formatter; +/// #[derive(Any)] +/// struct Struct { +/// /* .. */ +/// } +/// +/// #[rune::function(instance, protocol = STRING_DISPLAY)] +/// fn string_display(this: &Struct, f: &mut Formatter) -> std::fmt::Result { +/// /* .. */ +/// # todo!() +/// } +/// ``` +/// /// # Examples /// /// Defining and using a simple free function: @@ -485,7 +511,7 @@ pub(crate) use rune_macros::__internal_impl_any; /// /// let string = String::empty(); /// /// assert_eq!(string, "hello"); /// /// ``` -/// #[rune::function(path = String::empty)] +/// #[rune::function(free, path = String::empty)] /// fn empty() -> String { /// String { /// inner: std::string::String::new() diff --git a/crates/rune/src/module/module.rs b/crates/rune/src/module/module.rs index 09f7adaac..d98bd0ed7 100644 --- a/crates/rune/src/module/module.rs +++ b/crates/rune/src/module/module.rs @@ -241,7 +241,7 @@ impl Module { /// Accessor to modify type metadata such as documentaiton, fields, variants. pub fn type_meta(&mut self) -> Result, ContextError> where - T: Named + TypeOf, + T: ?Sized + Named + TypeOf, { let type_hash = T::type_hash(); @@ -955,7 +955,7 @@ impl Module { /// } /// /// /// Construct a new [`Struct`]. - /// #[rune::function(path = Struct::new)] + /// #[rune::function(free, path = Struct::new)] /// fn new() -> Struct { /// Struct { /// /* .. */ @@ -966,6 +966,24 @@ impl Module { /// The first part `Struct` in `Struct::new` is used to determine the type /// the function is associated with. /// + /// Protocol functions can either be defined in an impl block or externally. + /// To define a protocol externally, you can simply do this: + /// + /// ```rust + /// # use rune::Any; + /// # use rune::runtime::Formatter; + /// #[derive(Any)] + /// struct Struct { + /// /* .. */ + /// } + /// + /// #[rune::function(instance, protocol = STRING_DISPLAY)] + /// fn string_display(this: &Struct, f: &mut Formatter) -> std::fmt::Result { + /// /* .. */ + /// # todo!() + /// } + /// ``` + /// /// # Examples /// /// ``` diff --git a/crates/rune/src/modules.rs b/crates/rune/src/modules.rs index 9628e7a2a..372667d8d 100644 --- a/crates/rune/src/modules.rs +++ b/crates/rune/src/modules.rs @@ -18,6 +18,7 @@ pub mod f64; pub mod fmt; pub mod future; pub mod generator; +pub mod hash; pub mod i64; #[cfg(feature = "std")] pub mod io; diff --git a/crates/rune/src/modules/any.rs b/crates/rune/src/modules/any.rs index 2790da49c..c6d41ef7a 100644 --- a/crates/rune/src/modules/any.rs +++ b/crates/rune/src/modules/any.rs @@ -37,7 +37,7 @@ pub fn module() -> Result { /// let ty2 = Type::of_val(value2); /// assert_eq!(ty1, ty2); /// ``` -#[rune::function(path = Type::of_val)] +#[rune::function(free, path = Type::of_val)] #[inline] fn type_of_val(value: Value) -> VmResult { VmResult::Ok(Type::new(vm_try!(value.type_hash()))) diff --git a/crates/rune/src/modules/bytes.rs b/crates/rune/src/modules/bytes.rs index f8ca55b58..2331fd4b1 100644 --- a/crates/rune/src/modules/bytes.rs +++ b/crates/rune/src/modules/bytes.rs @@ -39,7 +39,7 @@ pub fn module() -> Result { /// let bytes = Bytes::new(); /// assert_eq!(bytes, b""); /// ``` -#[rune::function(path = Bytes::new)] +#[rune::function(free, path = Bytes::new)] #[inline] pub const fn new() -> Bytes { Bytes::new() @@ -55,7 +55,7 @@ pub const fn new() -> Bytes { /// bytes.extend(b"abcd"); /// assert_eq!(bytes, b"abcd"); /// ``` -#[rune::function(path = Bytes::with_capacity)] +#[rune::function(free, path = Bytes::with_capacity)] #[inline] pub fn with_capacity(capacity: usize) -> Bytes { Bytes::with_capacity(capacity) @@ -69,7 +69,7 @@ pub fn with_capacity(capacity: usize) -> Bytes { /// let bytes = Bytes::from_vec([b'a', b'b', b'c', b'd']); /// assert_eq!(bytes, b"abcd"); /// ``` -#[rune::function(path = Bytes::from_vec)] +#[rune::function(free, path = Bytes::from_vec)] #[inline] pub fn from_vec(bytes: Vec) -> Bytes { Bytes::from_vec(bytes) diff --git a/crates/rune/src/modules/collections.rs b/crates/rune/src/modules/collections.rs index 0dfb5b190..39c51d1fe 100644 --- a/crates/rune/src/modules/collections.rs +++ b/crates/rune/src/modules/collections.rs @@ -1,12 +1,16 @@ //! `std::collections` module. +#[cfg(feature = "std")] mod hash_map; +#[cfg(feature = "std")] mod hash_set; mod vec_deque; use crate::{ContextError, Module}; +#[cfg(feature = "std")] pub(crate) use self::hash_map::HashMap; +#[cfg(feature = "std")] pub(crate) use self::hash_set::HashSet; pub(crate) use self::vec_deque::VecDeque; use crate as rune; @@ -15,7 +19,9 @@ use crate as rune; /// The `std::collections` module. pub fn module() -> Result { let mut module = Module::from_meta(self::module_meta); + #[cfg(feature = "std")] hash_map::setup(&mut module)?; + #[cfg(feature = "std")] hash_set::setup(&mut module)?; vec_deque::setup(&mut module)?; Ok(module) diff --git a/crates/rune/src/modules/collections/hash_map.rs b/crates/rune/src/modules/collections/hash_map.rs index 95897276d..2f7c84b87 100644 --- a/crates/rune/src/modules/collections/hash_map.rs +++ b/crates/rune/src/modules/collections/hash_map.rs @@ -1,45 +1,44 @@ use core::fmt::{self, Write}; -use crate::no_std::collections; -use crate::no_std::prelude::*; - use crate as rune; +use crate::hashbrown::Table; use crate::runtime::{ - EnvProtocolCaller, Formatter, FromValue, Iterator, Key, ProtocolCaller, Value, VmErrorKind, + EnvProtocolCaller, Formatter, FromValue, Iterator, ProtocolCaller, Ref, Value, VmErrorKind, VmResult, }; use crate::{Any, ContextError, Module}; pub(super) fn setup(module: &mut Module) -> Result<(), ContextError> { module.ty::()?; - module.function_meta(HashMap::new)?; - module.function_meta(HashMap::with_capacity)?; - module.function_meta(HashMap::len)?; - module.function_meta(HashMap::insert)?; - module.function_meta(HashMap::get)?; - module.function_meta(HashMap::contains_key)?; - module.function_meta(HashMap::remove)?; - module.function_meta(HashMap::clear)?; - module.function_meta(HashMap::is_empty)?; - module.function_meta(HashMap::iter)?; - module.function_meta(HashMap::keys)?; - module.function_meta(HashMap::values)?; - module.function_meta(HashMap::extend)?; - module.function_meta(from)?; - module.function_meta(clone)?; - module.function_meta(HashMap::into_iter)?; - module.function_meta(HashMap::index_set)?; - module.function_meta(HashMap::index_get)?; - module.function_meta(HashMap::string_debug)?; - module.function_meta(HashMap::partial_eq)?; - module.function_meta(HashMap::eq)?; + module.function_meta(HashMap::new__meta)?; + module.function_meta(HashMap::with_capacity__meta)?; + module.function_meta(HashMap::len__meta)?; + module.function_meta(HashMap::capacity__meta)?; + module.function_meta(HashMap::insert__meta)?; + module.function_meta(HashMap::get__meta)?; + module.function_meta(HashMap::contains_key__meta)?; + module.function_meta(HashMap::remove__meta)?; + module.function_meta(HashMap::clear__meta)?; + module.function_meta(HashMap::is_empty__meta)?; + module.function_meta(HashMap::iter__meta)?; + module.function_meta(HashMap::keys__meta)?; + module.function_meta(HashMap::values__meta)?; + module.function_meta(HashMap::extend__meta)?; + module.function_meta(HashMap::from__meta)?; + module.function_meta(HashMap::clone__meta)?; + module.function_meta(HashMap::index_set__meta)?; + module.function_meta(HashMap::index_get__meta)?; + module.function_meta(HashMap::string_debug__meta)?; + module.function_meta(HashMap::partial_eq__meta)?; + module.function_meta(HashMap::eq__meta)?; + module.function_meta(HashMap::into_iter__meta)?; Ok(()) } #[derive(Any, Clone)] -#[rune(module = crate, item = ::std::collections)] +#[rune(item = ::std::collections)] pub(crate) struct HashMap { - map: collections::HashMap, + table: Table, } impl HashMap { @@ -54,10 +53,10 @@ impl HashMap { /// use std::collections::HashMap; /// let map = HashMap::new(); /// ``` - #[rune::function(path = Self::new)] + #[rune::function(keep, path = Self::new)] fn new() -> Self { Self { - map: collections::HashMap::new(), + table: Table::new(), } } @@ -73,10 +72,10 @@ impl HashMap { /// use std::collections::HashMap; /// let map = HashMap::with_capacity(10); /// ``` - #[rune::function(path = Self::with_capacity)] + #[rune::function(keep, path = Self::with_capacity)] fn with_capacity(capacity: usize) -> Self { Self { - map: collections::HashMap::with_capacity(capacity), + table: Table::with_capacity(capacity), } } @@ -92,9 +91,9 @@ impl HashMap { /// a.insert(1, "a"); /// assert_eq!(a.len(), 1); /// ``` - #[rune::function] + #[rune::function(keep)] fn len(&self) -> usize { - self.map.len() + self.table.len() } /// Returns the number of elements the map can hold without reallocating. @@ -109,9 +108,9 @@ impl HashMap { /// let map = HashMap::with_capacity(100); /// assert!(map.capacity() >= 100); /// ``` - #[rune::function] + #[rune::function(keep)] fn capacity(&self) -> usize { - self.map.capacity() + self.table.capacity() } /// Returns `true` if the map contains no elements. @@ -126,37 +125,9 @@ impl HashMap { /// a.insert(1, "a"); /// assert!(!a.is_empty()); /// ``` - #[rune::function] + #[rune::function(keep)] fn is_empty(&self) -> bool { - self.map.is_empty() - } - - /// An iterator visiting all key-value pairs in arbitrary order. - /// - /// # Examples - /// - /// ```rune - /// use std::collections::HashMap; - /// - /// let map = HashMap::from([ - /// ("a", 1), - /// ("b", 2), - /// ("c", 3), - /// ]); - /// - /// let pairs = map.iter().collect::(); - /// pairs.sort(); - /// assert_eq!(pairs, [("a", 1), ("b", 2), ("c", 3)]); - /// ``` - /// - /// # Performance - /// - /// In the current implementation, iterating over map takes O(capacity) time - /// instead of O(len) because it internally visits empty buckets too. - #[rune::function] - fn iter(&self) -> Iterator { - let iter = self.map.clone().into_iter(); - Iterator::from("std::collections::map::Iter", iter) + self.table.is_empty() } /// Inserts a key-value pair into the map. @@ -183,9 +154,10 @@ impl HashMap { /// assert_eq!(map.insert(37, "c"), Some("b")); /// assert_eq!(map[37], "c"); /// ``` - #[rune::function] - fn insert(&mut self, key: Key, value: Value) -> Option { - self.map.insert(key, value) + #[rune::function(keep)] + fn insert(&mut self, key: Value, value: Value) -> VmResult> { + let mut caller = EnvProtocolCaller; + self.table.insert_with(key, value, &mut caller) } /// Returns the value corresponding to the [`Key`]. @@ -200,9 +172,10 @@ impl HashMap { /// assert_eq!(map.get(1), Some("a")); /// assert_eq!(map.get(2), None); /// ``` - #[rune::function] - fn get(&self, key: Key) -> Option { - self.map.get(&key).cloned() + #[rune::function(keep)] + fn get(&self, key: Value) -> VmResult> { + let mut caller = EnvProtocolCaller; + VmResult::Ok(vm_try!(self.table.get(&key, &mut caller)).map(|(_, v)| v.clone())) } /// Returns `true` if the map contains a value for the specified [`Key`]. @@ -217,9 +190,10 @@ impl HashMap { /// assert_eq!(map.contains_key(1), true); /// assert_eq!(map.contains_key(2), false); /// ``` - #[rune::function] - fn contains_key(&self, key: Key) -> bool { - self.map.contains_key(&key) + #[rune::function(keep)] + fn contains_key(&self, key: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + VmResult::Ok(vm_try!(self.table.get(&key, &mut caller)).is_some()) } /// Removes a key from the map, returning the value at the [`Key`] if the @@ -235,9 +209,10 @@ impl HashMap { /// assert_eq!(map.remove(1), Some("a")); /// assert_eq!(map.remove(1), None); /// ``` - #[rune::function] - fn remove(&mut self, key: Key) -> Option { - self.map.remove(&key) + #[rune::function(keep)] + fn remove(&mut self, key: Value) -> VmResult> { + let mut caller = EnvProtocolCaller; + self.table.remove_with(&key, &mut caller) } /// Clears the map, removing all key-value pairs. Keeps the allocated memory @@ -253,9 +228,37 @@ impl HashMap { /// a.clear(); /// assert!(a.is_empty()); /// ``` - #[rune::function] + #[rune::function(keep)] fn clear(&mut self) { - self.map.clear() + self.table.clear() + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// + /// # Examples + /// + /// ```rune + /// use std::collections::HashMap; + /// + /// let map = HashMap::from([ + /// ("a", 1), + /// ("b", 2), + /// ("c", 3), + /// ]); + /// + /// let pairs = map.iter().collect::(); + /// pairs.sort(); + /// assert_eq!(pairs, [("a", 1), ("b", 2), ("c", 3)]); + /// ``` + /// + /// # Performance + /// + /// In the current implementation, iterating over map takes O(capacity) time + /// instead of O(len) because it internally visits empty buckets too. + #[rune::function(keep, instance, path = Self::iter)] + fn iter(this: Ref) -> Iterator { + let iter = Table::iter_ref(Ref::map(this, |this| &this.table)); + Iterator::from("std::collections::hash_map::Iter", iter) } /// An iterator visiting all keys in arbitrary order. @@ -280,10 +283,10 @@ impl HashMap { /// /// In the current implementation, iterating over keys takes O(capacity) /// time instead of O(len) because it internally visits empty buckets too. - #[rune::function] - fn keys(&self) -> Iterator { - let iter = self.map.keys().cloned().collect::>().into_iter(); - Iterator::from("std::collections::map::Keys", iter) + #[rune::function(keep, instance, path = Self::keys)] + fn keys(this: Ref) -> Iterator { + let iter = Table::keys_ref(Ref::map(this, |this| &this.table)); + Iterator::from("std::collections::hash_map::Keys", iter) } /// An iterator visiting all values in arbitrary order. @@ -308,10 +311,11 @@ impl HashMap { /// /// In the current implementation, iterating over values takes O(capacity) /// time instead of O(len) because it internally visits empty buckets too. - #[rune::function] - fn values(&self) -> Iterator { - let iter = self.map.values().cloned().collect::>().into_iter(); - Iterator::from("std::collections::map::Values", iter) + #[rune::function(keep, instance, path = Self::values)] + fn values(this: Ref) -> Iterator { + let iter = Table::values_ref(Ref::map(this, |this| &this.table)); + + Iterator::from("std::collections::hash_map::Values", iter) } /// Extend this map from an iterator. @@ -329,59 +333,60 @@ impl HashMap { /// ("c", 3), /// ]); /// ``` - #[rune::function] + #[rune::function(keep)] fn extend(&mut self, value: Value) -> VmResult<()> { let mut it = vm_try!(value.into_iter()); while let Some(value) = vm_try!(it.next()) { - let (key, value) = vm_try!(<(Key, Value)>::from_value(value)); - self.map.insert(key, value); + let (key, value) = vm_try!(<(Value, Value)>::from_value(value)); + vm_try!(self.insert(key, value)); } VmResult::Ok(()) } - pub(crate) fn from_iter(mut it: Iterator) -> VmResult { - let mut map = collections::HashMap::new(); - - while let Some(value) = vm_try!(it.next()) { - let (key, value) = vm_try!(<(Key, Value)>::from_value(value)); - map.insert(key, value); - } - - VmResult::Ok(Self { map }) + /// Convert a hashmap from a `value`. + /// + /// The hashmap can be converted from anything that implements the [`INTO_ITER`] + /// protocol, and each item produces should be a tuple pair. + #[rune::function(keep, path = Self::from)] + fn from(value: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + HashMap::from_iter(vm_try!(value.into_iter()), &mut caller) } - /// An iterator visiting all key-value pairs in arbitrary order. + /// Clone the map. /// /// # Examples /// /// ```rune /// use std::collections::HashMap; /// - /// let map = HashMap::from([ - /// ("a", 1), - /// ("b", 2), - /// ("c", 3), - /// ]); - /// - /// let pairs = []; + /// let a = HashMap::from([("a", 1), ("b", 2)]); + /// let b = a.clone(); /// - /// for pair in map { - /// pairs.push(pair); - /// } + /// b.insert("c", 3); /// - /// pairs.sort(); - /// assert_eq!(pairs, [("a", 1), ("b", 2), ("c", 3)]); + /// assert_eq!(a.len(), 2); + /// assert_eq!(b.len(), 3); /// ``` - /// - /// # Performance - /// - /// In the current implementation, iterating over map takes O(capacity) time - /// instead of O(len) because it internally visits empty buckets too. - #[rune::function(protocol = INTO_ITER)] - fn into_iter(&self) -> Iterator { - self.__rune_fn__iter() + #[rune::function(keep, instance, path = Self::clone)] + fn clone(this: &HashMap) -> HashMap { + Clone::clone(this) + } + + pub(crate) fn from_iter

(mut it: Iterator, caller: &mut P) -> VmResult + where + P: ?Sized + ProtocolCaller, + { + let mut map = Self::new(); + + while let Some(value) = vm_try!(it.next()) { + let (key, value) = vm_try!(<(Value, Value)>::from_value(value)); + vm_try!(map.table.insert_with(key, value, caller)); + } + + VmResult::Ok(map) } /// Inserts a key-value pair into the map. @@ -402,9 +407,10 @@ impl HashMap { /// map[37] = "c"; /// assert_eq!(map[37], "c"); /// ``` - #[rune::function(protocol = INDEX_SET)] - fn index_set(&mut self, key: Key, value: Value) { - let _ = self.map.insert(key, value); + #[rune::function(keep, protocol = INDEX_SET)] + fn index_set(&mut self, key: Value, value: Value) -> VmResult<()> { + let _ = vm_try!(self.insert(key, value)); + VmResult::Ok(()) } /// Returns a the value corresponding to the key. @@ -429,16 +435,17 @@ impl HashMap { /// map[1] = "a"; /// assert_eq!(map[1], "a"); /// ``` - #[rune::function(protocol = INDEX_GET)] - fn index_get(&self, key: Key) -> VmResult { + #[rune::function(keep, protocol = INDEX_GET)] + fn index_get(&self, key: Value) -> VmResult { use crate::runtime::TypeOf; - let value = vm_try!(self.map.get(&key).ok_or_else(|| { - VmErrorKind::MissingIndexKey { + let mut caller = EnvProtocolCaller; + + let Some((_, value)) = vm_try!(self.table.get(&key, &mut caller)) else { + return VmResult::err(VmErrorKind::MissingIndexKey { target: Self::type_info(), - index: key, - } - })); + }); + }; VmResult::Ok(value.clone()) } @@ -455,7 +462,7 @@ impl HashMap { /// /// assert_eq!(format!("{:?}", map), "{1: \"a\"}"); /// ``` - #[rune::function(protocol = STRING_DEBUG)] + #[rune::function(keep, protocol = STRING_DEBUG)] fn string_debug(&self, f: &mut Formatter) -> VmResult { self.string_debug_with(f, &mut EnvProtocolCaller) } @@ -467,10 +474,14 @@ impl HashMap { ) -> VmResult { vm_write!(f, "{{"); - let mut it = self.map.iter().peekable(); + let mut it = self.table.iter().peekable(); while let Some((key, value)) = it.next() { - vm_write!(f, "{:?}: ", key); + if let Err(fmt::Error) = vm_try!(key.string_debug_with(f, caller)) { + return VmResult::Ok(Err(fmt::Error)); + } + + vm_write!(f, ": "); if let Err(fmt::Error) = vm_try!(value.string_debug_with(f, caller)) { return VmResult::Ok(Err(fmt::Error)); @@ -511,26 +522,22 @@ impl HashMap { /// /// assert!(map1 != map2); /// ``` - #[rune::function(protocol = PARTIAL_EQ)] + #[rune::function(keep, protocol = PARTIAL_EQ)] fn partial_eq(&self, other: &Self) -> VmResult { self.partial_eq_with(other, &mut EnvProtocolCaller) } - pub(crate) fn partial_eq_with( - &self, - other: &Self, - caller: &mut impl ProtocolCaller, - ) -> VmResult { - if self.map.len() != other.map.len() { + fn partial_eq_with(&self, other: &Self, caller: &mut impl ProtocolCaller) -> VmResult { + if self.table.len() != other.table.len() { return VmResult::Ok(false); } - for (k, v) in self.map.iter() { - let Some(v2) = other.map.get(k) else { + for (k, v1) in self.table.iter() { + let Some((_, v2)) = vm_try!(other.table.get(k, caller)) else { return VmResult::Ok(false); }; - if !vm_try!(Value::partial_eq_with(v, v2, caller)) { + if !vm_try!(Value::partial_eq_with(v1, v2, caller)) { return VmResult::Ok(false); } } @@ -560,55 +567,58 @@ impl HashMap { /// /// assert!(eq(map1, map2)); /// ``` - #[rune::function(protocol = EQ)] + #[rune::function(keep, protocol = EQ)] fn eq(&self, other: &Self) -> VmResult { self.eq_with(other, &mut EnvProtocolCaller) } fn eq_with(&self, other: &Self, caller: &mut EnvProtocolCaller) -> VmResult { - if self.map.len() != other.map.len() { + if self.table.len() != other.table.len() { return VmResult::Ok(false); } - for (k, v) in self.map.iter() { - let Some(v2) = other.map.get(k) else { + for (k, v1) in self.table.iter() { + let Some((_, v2)) = vm_try!(other.table.get(k, caller)) else { return VmResult::Ok(false); }; - if !vm_try!(Value::eq_with(v, v2, caller)) { + if !vm_try!(Value::eq_with(v1, v2, caller)) { return VmResult::Ok(false); } } VmResult::Ok(true) } -} -/// Convert a hashmap from a `value`. -/// -/// The hashmap can be converted from anything that implements the [`INTO_ITER`] -/// protocol, and each item produces should be a tuple pair. -#[rune::function(path = HashMap::from)] -fn from(value: Value) -> VmResult { - HashMap::from_iter(vm_try!(value.into_iter())) -} - -/// Clone the map. -/// -/// # Examples -/// -/// ```rune -/// use std::collections::HashMap; -/// -/// let a = HashMap::from([("a", 1), ("b", 2)]); -/// let b = a.clone(); -/// -/// b.insert("c", 3); -/// -/// assert_eq!(a.len(), 2); -/// assert_eq!(b.len(), 3); -/// ``` -#[rune::function(instance)] -fn clone(this: &HashMap) -> HashMap { - this.clone() + /// An iterator visiting all key-value pairs in arbitrary order. + /// + /// # Examples + /// + /// ```rune + /// use std::collections::HashMap; + /// + /// let map = HashMap::from([ + /// ("a", 1), + /// ("b", 2), + /// ("c", 3), + /// ]); + /// + /// let pairs = []; + /// + /// for pair in map { + /// pairs.push(pair); + /// } + /// + /// pairs.sort(); + /// assert_eq!(pairs, [("a", 1), ("b", 2), ("c", 3)]); + /// ``` + /// + /// # Performance + /// + /// In the current implementation, iterating over map takes O(capacity) time + /// instead of O(len) because it internally visits empty buckets too. + #[rune::function(keep, instance, protocol = INTO_ITER, path = Self)] + fn into_iter(this: Ref) -> Iterator { + Self::iter(this) + } } diff --git a/crates/rune/src/modules/collections/hash_set.rs b/crates/rune/src/modules/collections/hash_set.rs index 7de74ea43..6d6e5c932 100644 --- a/crates/rune/src/modules/collections/hash_set.rs +++ b/crates/rune/src/modules/collections/hash_set.rs @@ -1,44 +1,44 @@ use core::fmt::{self, Write}; use core::iter; +use core::ptr; use crate as rune; -use crate::no_std::collections; +use crate::hashbrown::{IterRef, RawIter, Table}; use crate::runtime::{ - EnvProtocolCaller, Formatter, Iterator, IteratorTrait, Key, ProtocolCaller, Ref, Value, - VmResult, + EnvProtocolCaller, Formatter, Iterator, ProtocolCaller, RawRef, Ref, Value, VmResult, }; use crate::{Any, ContextError, Module}; pub(super) fn setup(module: &mut Module) -> Result<(), ContextError> { module.ty::()?; - module.function_meta(HashSet::new)?; - module.function_meta(HashSet::with_capacity)?; - module.function_meta(HashSet::len)?; - module.function_meta(HashSet::is_empty)?; - module.function_meta(HashSet::capacity)?; - module.function_meta(HashSet::insert)?; - module.function_meta(HashSet::remove)?; - module.function_meta(HashSet::contains)?; - module.function_meta(HashSet::clear)?; - module.function_meta(HashSet::difference)?; - module.function_meta(HashSet::extend)?; - module.function_meta(HashSet::intersection)?; - module.function_meta(HashSet::union)?; - module.function_meta(HashSet::iter)?; - module.function_meta(clone)?; - module.function_meta(from)?; - module.function_meta(HashSet::into_iter)?; - module.function_meta(HashSet::string_debug)?; - module.function_meta(HashSet::partial_eq)?; - module.function_meta(HashSet::eq)?; + module.function_meta(HashSet::new__meta)?; + module.function_meta(HashSet::with_capacity__meta)?; + module.function_meta(HashSet::len__meta)?; + module.function_meta(HashSet::is_empty__meta)?; + module.function_meta(HashSet::capacity__meta)?; + module.function_meta(HashSet::insert__meta)?; + module.function_meta(HashSet::remove__meta)?; + module.function_meta(HashSet::contains__meta)?; + module.function_meta(HashSet::clear__meta)?; + module.function_meta(HashSet::difference__meta)?; + module.function_meta(HashSet::extend__meta)?; + module.function_meta(HashSet::intersection__meta)?; + module.function_meta(HashSet::union__meta)?; + module.function_meta(HashSet::iter__meta)?; + module.function_meta(HashSet::into_iter__meta)?; + module.function_meta(HashSet::string_debug__meta)?; + module.function_meta(HashSet::partial_eq__meta)?; + module.function_meta(HashSet::eq__meta)?; + module.function_meta(HashSet::clone__meta)?; + module.function_meta(HashSet::from__meta)?; Ok(()) } #[derive(Any, Clone)] #[rune(module = crate, item = ::std::collections)] pub(crate) struct HashSet { - set: collections::HashSet, + table: Table<()>, } impl HashSet { @@ -54,10 +54,10 @@ impl HashSet { /// /// let set = HashSet::new(); /// ``` - #[rune::function(path = Self::new)] + #[rune::function(keep, path = Self::new)] fn new() -> Self { Self { - set: collections::HashSet::new(), + table: Table::new(), } } @@ -75,10 +75,10 @@ impl HashSet { /// let set = HashSet::with_capacity(10); /// assert!(set.capacity() >= 10); /// ``` - #[rune::function(path = Self::with_capacity)] + #[rune::function(keep, path = Self::with_capacity)] fn with_capacity(capacity: usize) -> Self { Self { - set: collections::HashSet::with_capacity(capacity), + table: Table::with_capacity(capacity), } } @@ -94,9 +94,9 @@ impl HashSet { /// v.insert(1); /// assert_eq!(v.len(), 1); /// ``` - #[rune::function] + #[rune::function(keep)] fn len(&self) -> usize { - self.set.len() + self.table.len() } /// Returns `true` if the set contains no elements. @@ -111,9 +111,9 @@ impl HashSet { /// v.insert(1); /// assert!(!v.is_empty()); /// ``` - #[rune::function] + #[rune::function(keep)] fn is_empty(&self) -> bool { - self.set.is_empty() + self.table.is_empty() } /// Returns the number of elements the set can hold without reallocating. @@ -126,9 +126,9 @@ impl HashSet { /// let set = HashSet::with_capacity(100); /// assert!(set.capacity() >= 100); /// ``` - #[rune::function] + #[rune::function(keep)] fn capacity(&self) -> usize { - self.set.capacity() + self.table.capacity() } /// Adds a value to the set. @@ -149,9 +149,10 @@ impl HashSet { /// assert_eq!(set.insert(2), false); /// assert_eq!(set.len(), 1); /// ``` - #[rune::function] - fn insert(&mut self, key: Key) -> bool { - self.set.insert(key) + #[rune::function(keep)] + fn insert(&mut self, key: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + VmResult::Ok(vm_try!(self.table.insert_with(key, (), &mut caller)).is_none()) } /// Removes a value from the set. Returns whether the value was present in @@ -168,9 +169,10 @@ impl HashSet { /// assert_eq!(set.remove(2), true); /// assert_eq!(set.remove(2), false); /// ``` - #[rune::function] - fn remove(&mut self, key: Key) -> bool { - self.set.remove(&key) + #[rune::function(keep)] + fn remove(&mut self, key: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + VmResult::Ok(vm_try!(self.table.remove_with(&key, &mut caller)).is_some()) } /// Returns `true` if the set contains a value. @@ -184,9 +186,10 @@ impl HashSet { /// assert_eq!(set.contains(1), true); /// assert_eq!(set.contains(4), false); /// ``` - #[rune::function] - fn contains(&self, key: Key) -> bool { - self.set.contains(&key) + #[rune::function(keep)] + fn contains(&self, key: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + VmResult::Ok(vm_try!(self.table.get(&key, &mut caller)).is_some()) } /// Clears the set, removing all values. @@ -201,9 +204,9 @@ impl HashSet { /// v.clear(); /// assert!(v.is_empty()); /// ``` - #[rune::function] + #[rune::function(keep)] fn clear(&mut self) { - self.set.clear() + self.table.clear() } /// Visits the values representing the difference, i.e., the values that are @@ -225,15 +228,17 @@ impl HashSet { /// let diff = b.difference(a).collect::(); /// assert_eq!(diff, [4].iter().collect::()); /// ``` - #[rune::function(instance, path = Self::difference)] + #[rune::function(keep, instance, path = Self::difference)] fn difference(this: Ref, other: Ref) -> Iterator { - Iterator::from( - "std::collections::set::Difference", - Difference { - this: this.set.clone().into_iter(), - other: Some(other), - }, - ) + let iter = Self::difference_inner(this, other); + Iterator::from("std::collections::hash_set::Difference", iter) + } + + fn difference_inner(this: Ref, other: Ref) -> Difference { + Difference { + this: Table::iter_ref(Ref::map(this, |this| &this.table)), + other: Some(other), + } } /// Visits the values representing the intersection, i.e., the values that @@ -253,22 +258,22 @@ impl HashSet { /// let values = a.intersection(b).collect::(); /// assert_eq!(values, [2, 3].iter().collect::()); /// ``` - #[rune::function(instance, path = Self::intersection)] + #[rune::function(keep, instance, path = Self::intersection)] fn intersection(this: Ref, other: Ref) -> Iterator { // use shortest iterator as driver for intersections - let intersection = if this.set.len() <= other.set.len() { + let iter = if this.table.len() <= other.table.len() { Intersection { - this: this.set.clone().into_iter(), + this: Table::iter_ref(Ref::map(this, |this| &this.table)), other: Some(other), } } else { Intersection { - this: other.set.clone().into_iter(), + this: Table::iter_ref(Ref::map(other, |this| &this.table)), other: Some(this), } }; - Iterator::from("std::collections::set::Intersection", intersection) + Iterator::from("std::collections::hash_set::Intersection", iter) } /// Visits the values representing the union, i.e., all the values in `self` @@ -284,21 +289,41 @@ impl HashSet { /// /// let union = a.union(b).collect::(); /// assert_eq!(union, HashSet::from([1, 2, 3, 4])); + /// + /// let union = b.union(a).collect::(); + /// assert_eq!(union, HashSet::from([1, 2, 3, 4])); /// ``` - #[rune::function(instance, path = Self::union)] + #[rune::function(keep, instance, path = Self::union)] fn union(this: Ref, other: Ref) -> VmResult { - // use longest as lead and then append any missing that are in second - let iter = Union { - iter: if this.set.len() >= other.set.len() { - vm_try!(HashSet::__rune_fn__iter(&this) - .chain_raw(HashSet::__rune_fn__difference(other, this))) + unsafe { + let (this, this_guard) = Ref::into_raw(Ref::map(this, |this| &this.table)); + let (other, other_guard) = Ref::into_raw(Ref::map(other, |this| &this.table)); + + // use longest as lead and then append any missing that are in second + let iter = if this.as_ref().len() >= other.as_ref().len() { + let this_iter = Table::iter_ref_raw(this); + let other_iter = Table::iter_ref_raw(other); + + Union { + this, + this_iter, + other_iter, + _guards: (this_guard, other_guard), + } } else { - vm_try!(HashSet::__rune_fn__iter(&other) - .chain_raw(HashSet::__rune_fn__difference(this, other))) - }, - }; - - VmResult::Ok(Iterator::from("std::collections::set::Union", iter)) + let this_iter = Table::iter_ref_raw(other); + let other_iter = Table::iter_ref_raw(this); + + Union { + this: other, + this_iter, + other_iter, + _guards: (other_guard, this_guard), + } + }; + + VmResult::Ok(Iterator::from("std::collections::hash_set::Union", iter)) + } } /// Iterate over the hash set. @@ -313,20 +338,24 @@ impl HashSet { /// vec.sort(); /// assert_eq!(vec, [1, 2, 3]); /// ``` - #[rune::function] - fn iter(&self) -> Iterator { - let iter = self.set.clone().into_iter(); - Iterator::from("std::collections::set::Iter", iter) + #[rune::function(keep, instance, path = Self::iter)] + fn iter(this: Ref) -> VmResult { + let iter = Self::iter_inner(this); + VmResult::Ok(Iterator::from("std::collections::hash_set::Iter", iter)) + } + + fn iter_inner(this: Ref) -> impl iter::Iterator { + Table::iter_ref(Ref::map(this, |this| &this.table)).map(|(key, ())| key) } /// Extend this set from an iterator. - #[rune::function] + #[rune::function(keep)] fn extend(&mut self, value: Value) -> VmResult<()> { + let mut caller = EnvProtocolCaller; let mut it = vm_try!(value.into_iter()); - while let Some(value) = vm_try!(it.next()) { - let key = vm_try!(Key::from_value(&value)); - self.set.insert(key); + while let Some(key) = vm_try!(it.next()) { + vm_try!(self.table.insert_with(key, (), &mut caller)); } VmResult::Ok(()) @@ -349,9 +378,9 @@ impl HashSet { /// vec.sort(); /// assert_eq!(vec, [1, 2, 3]); /// ``` - #[rune::function(protocol = INTO_ITER)] - fn into_iter(&self) -> Iterator { - self.__rune_fn__iter() + #[rune::function(keep, instance, protocol = INTO_ITER, path = Self)] + fn into_iter(this: Ref) -> VmResult { + Self::iter(this) } /// Write a debug representation to a string. @@ -367,7 +396,7 @@ impl HashSet { /// let set = HashSet::from([1, 2, 3]); /// println!("{:?}", set); /// ``` - #[rune::function(protocol = STRING_DEBUG)] + #[rune::function(keep, protocol = STRING_DEBUG)] fn string_debug(&self, f: &mut Formatter) -> VmResult { self.string_debug_with(f, &mut EnvProtocolCaller) } @@ -379,7 +408,7 @@ impl HashSet { ) -> VmResult { vm_write!(f, "{{"); - let mut it = self.set.iter().peekable(); + let mut it = self.table.iter().peekable(); while let Some(value) = it.next() { vm_write!(f, "{:?}", value); @@ -393,14 +422,17 @@ impl HashSet { VmResult::Ok(Ok(())) } - pub(crate) fn from_iter(mut it: Iterator) -> VmResult { - let mut set = collections::HashSet::with_capacity(it.size_hint().0); + pub(crate) fn from_iter

(mut it: Iterator, caller: &mut P) -> VmResult + where + P: ?Sized + ProtocolCaller, + { + let mut set = Table::with_capacity(it.size_hint().0); - while let Some(value) = vm_try!(it.next()) { - set.insert(vm_try!(Key::from_value(&value))); + while let Some(key) = vm_try!(it.next()) { + vm_try!(set.insert_with(key, (), caller)); } - VmResult::Ok(HashSet { set }) + VmResult::Ok(HashSet { table: set }) } /// Perform a partial equality test between two sets. @@ -416,9 +448,9 @@ impl HashSet { /// assert_eq!(set, HashSet::from([1, 2, 3])); /// assert_ne!(set, HashSet::from([2, 3, 4])); /// ``` - #[rune::function(protocol = PARTIAL_EQ)] - fn partial_eq(&self, other: &Self) -> bool { - self.set == other.set + #[rune::function(keep, protocol = PARTIAL_EQ)] + fn partial_eq(&self, other: &Self) -> VmResult { + self.eq_with(other, &mut EnvProtocolCaller) } /// Perform a total equality test between two sets. @@ -433,36 +465,62 @@ impl HashSet { /// assert!(eq(set, HashSet::from([1, 2, 3]))); /// assert!(!eq(set, HashSet::from([2, 3, 4]))); /// ``` - #[rune::function(protocol = EQ)] - fn eq(&self, other: &Self) -> bool { - self.set == other.set + #[rune::function(keep, protocol = EQ)] + fn eq(&self, other: &Self) -> VmResult { + self.eq_with(other, &mut EnvProtocolCaller) + } + + fn eq_with(&self, other: &Self, caller: &mut EnvProtocolCaller) -> VmResult { + if self.table.len() != other.table.len() { + return VmResult::Ok(false); + } + + for (key, ()) in self.table.iter() { + if vm_try!(other.table.get(key, caller)).is_none() { + return VmResult::Ok(false); + } + } + + VmResult::Ok(true) + } + + #[rune::function(keep, path = Self::from)] + fn from(value: Value) -> VmResult { + let mut caller = EnvProtocolCaller; + HashSet::from_iter(vm_try!(value.into_iter()), &mut caller) + } + + #[rune::function(keep, instance, path = Self::clone)] + fn clone(this: &HashSet) -> HashSet { + this.clone() } } -struct Intersection -where - I: iter::Iterator, -{ - this: I, +struct Intersection { + this: IterRef<()>, other: Option>, } -impl iter::Iterator for Intersection -where - I: iter::Iterator, -{ - type Item = Key; +impl iter::Iterator for Intersection { + type Item = VmResult; + fn next(&mut self) -> Option { - let other = self.other.take()?; + let mut caller = EnvProtocolCaller; + let other = self.other.as_ref()?; - loop { - let item = self.this.next()?; + for (key, ()) in self.this.by_ref() { + let c = match other.table.get(&key, &mut caller) { + VmResult::Ok(c) => c.is_some(), + VmResult::Err(e) => return Some(VmResult::Err(e)), + }; - if other.set.contains(&item) { - self.other = Some(other); - return Some(item); + if c { + return Some(VmResult::Ok(key)); } } + + self.other = None; + None } #[inline] @@ -472,31 +530,31 @@ where } } -struct Difference -where - I: iter::Iterator, -{ - this: I, +struct Difference { + this: IterRef<()>, other: Option>, } -impl iter::Iterator for Difference -where - I: iter::Iterator, -{ - type Item = Key; +impl iter::Iterator for Difference { + type Item = VmResult; fn next(&mut self) -> Option { - let other = self.other.take()?; + let mut caller = EnvProtocolCaller; + let other = self.other.as_ref()?; - loop { - let item = self.this.next()?; + for (key, ()) in self.this.by_ref() { + let c = match other.table.get(&key, &mut caller) { + VmResult::Ok(c) => c.is_some(), + VmResult::Err(e) => return Some(VmResult::Err(e)), + }; - if !other.set.contains(&item) { - self.other = Some(other); - return Some(item); + if !c { + return Some(VmResult::Ok(key)); } } + + self.other = None; + None } #[inline] @@ -507,25 +565,37 @@ where } struct Union { - iter: Iterator, + this: ptr::NonNull>, + this_iter: RawIter<(Value, ())>, + other_iter: RawIter<(Value, ())>, + _guards: (RawRef, RawRef), } -impl IteratorTrait for Union { - fn next(&mut self) -> VmResult> { - self.iter.next() - } +impl iter::Iterator for Union { + type Item = VmResult; - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} + fn next(&mut self) -> Option { + // SAFETY: we're holding onto the ref guards for both collections during + // iteration, so this is valid for the lifetime of the iterator. + unsafe { + if let Some(bucket) = self.this_iter.next() { + let (value, ()) = bucket.as_ref(); + return Some(VmResult::Ok(value.clone())); + } -#[rune::function(path = HashSet::from)] -fn from(value: Value) -> VmResult { - HashSet::from_iter(vm_try!(value.into_iter())) -} + let mut caller = EnvProtocolCaller; + + for bucket in self.other_iter.by_ref() { + let (key, ()) = bucket.as_ref(); -#[rune::function(instance)] -fn clone(this: &HashSet) -> HashSet { - this.clone() + match self.this.as_ref().get(key, &mut caller) { + VmResult::Ok(None) => return Some(VmResult::Ok(key.clone())), + VmResult::Ok(..) => {} + VmResult::Err(e) => return Some(VmResult::Err(e)), + } + } + + None + } + } } diff --git a/crates/rune/src/modules/collections/vec_deque.rs b/crates/rune/src/modules/collections/vec_deque.rs index 8a85797f3..1ddb01fa5 100644 --- a/crates/rune/src/modules/collections/vec_deque.rs +++ b/crates/rune/src/modules/collections/vec_deque.rs @@ -718,7 +718,7 @@ impl VecDeque { /// /// let buf = VecDeque::from([1, 2, 3]); /// ``` -#[rune::function(path = VecDeque::from)] +#[rune::function(free, path = VecDeque::from)] fn from(value: Value) -> VmResult { VecDeque::from_iter(vm_try!(value.into_iter())) } diff --git a/crates/rune/src/modules/f64.rs b/crates/rune/src/modules/f64.rs index 6500a92da..f2de1981f 100644 --- a/crates/rune/src/modules/f64.rs +++ b/crates/rune/src/modules/f64.rs @@ -293,10 +293,7 @@ fn partial_eq(this: f64, rhs: f64) -> bool { #[inline] fn eq(this: f64, rhs: f64) -> VmResult { let Some(ordering) = this.partial_cmp(&rhs) else { - return VmResult::err(VmErrorKind::IllegalFloatComparison { - lhs: this, - rhs, - }) + return VmResult::err(VmErrorKind::IllegalFloatComparison { lhs: this, rhs }); }; VmResult::Ok(matches!(ordering, Ordering::Equal)) @@ -337,10 +334,7 @@ fn partial_cmp(this: f64, rhs: f64) -> Option { #[inline] fn cmp(this: f64, rhs: f64) -> VmResult { let Some(ordering) = this.partial_cmp(&rhs) else { - return VmResult::err(VmErrorKind::IllegalFloatComparison { - lhs: this, - rhs, - }) + return VmResult::err(VmErrorKind::IllegalFloatComparison { lhs: this, rhs }); }; VmResult::Ok(ordering) diff --git a/crates/rune/src/modules/hash.rs b/crates/rune/src/modules/hash.rs new file mode 100644 index 000000000..127ad55e6 --- /dev/null +++ b/crates/rune/src/modules/hash.rs @@ -0,0 +1,16 @@ +//! The `std::hash` module. + +use crate as rune; +#[cfg(feature = "std")] +use crate::runtime::Hasher; +use crate::{ContextError, Module}; + +#[rune::module(::std::hash)] +/// Types for dealing with hashing in Rune. +pub fn module() -> Result { + #[allow(unused_mut)] + let mut module = Module::from_meta(self::module_meta); + #[cfg(feature = "std")] + module.ty::()?; + Ok(module) +} diff --git a/crates/rune/src/modules/iter.rs b/crates/rune/src/modules/iter.rs index 8ed18dc4c..d6bea9b5e 100644 --- a/crates/rune/src/modules/iter.rs +++ b/crates/rune/src/modules/iter.rs @@ -3,7 +3,11 @@ use crate::no_std::prelude::*; use crate as rune; -use crate::modules::collections::{HashMap, HashSet, VecDeque}; +use crate::modules::collections::VecDeque; +#[cfg(feature = "std")] +use crate::modules::collections::{HashMap, HashSet}; +#[cfg(feature = "std")] +use crate::runtime::EnvProtocolCaller; use crate::runtime::{ FromValue, Function, Iterator, Object, OwnedTuple, Protocol, Value, Vec, VmResult, }; @@ -48,7 +52,9 @@ pub fn module() -> Result { module.function_meta(collect_vec)?; module.function_meta(collect_vec_deque)?; + #[cfg(feature = "std")] module.function_meta(collect_hash_set)?; + #[cfg(feature = "std")] module.function_meta(collect_hash_map)?; module.function_meta(collect_tuple)?; module.function_meta(collect_object)?; @@ -1069,8 +1075,10 @@ fn collect_vec_deque(it: Iterator) -> VmResult { /// assert_eq!((0..3).iter().collect::(), HashSet::from([0, 1, 2])); /// ``` #[rune::function(instance, path = collect::)] +#[cfg(feature = "std")] fn collect_hash_set(it: Iterator) -> VmResult { - HashSet::from_iter(it) + let mut caller = EnvProtocolCaller; + HashSet::from_iter(it, &mut caller) } /// Collect the iterator as a [`HashMap`]. @@ -1085,8 +1093,10 @@ fn collect_hash_set(it: Iterator) -> VmResult { /// assert_eq!(actual, expected); /// ``` #[rune::function(instance, path = collect::)] +#[cfg(feature = "std")] fn collect_hash_map(it: Iterator) -> VmResult { - HashMap::from_iter(it) + let mut caller = EnvProtocolCaller; + HashMap::from_iter(it, &mut caller) } /// Collect the iterator as a [`Tuple`]. diff --git a/crates/rune/src/modules/ops.rs b/crates/rune/src/modules/ops.rs index a5628cd58..81805b10a 100644 --- a/crates/rune/src/modules/ops.rs +++ b/crates/rune/src/modules/ops.rs @@ -3,11 +3,15 @@ use core::cmp::Ordering; use crate as rune; +#[cfg(feature = "std")] +use crate::runtime::Hasher; use crate::runtime::{ ControlFlow, EnvProtocolCaller, Function, Generator, GeneratorState, Iterator, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, Value, Vm, VmResult, }; use crate::{ContextError, Module}; +#[cfg(feature = "std")] +use std::collections::hash_map::RandomState; #[rune::module(::std::ops)] /// Overloadable operators. @@ -77,26 +81,7 @@ pub fn module() -> Result { m.ty::()?; { - m.ty::>()?.docs([ - "The return value of a function producing a generator.", - "", - "Functions which contain the `yield` keyword produces generators.", - "", - "# Examples", - "", - "```rune", - "use std::ops::Generator;", - "", - "fn generate() {", - " yield 1;", - " yield 2;", - "}", - "", - "let g = generate();", - "assert!(g is Generator)", - "```", - ]); - + m.ty::>()?; m.function_meta(generator_next)?; m.function_meta(generator_resume)?; m.function_meta(generator_iter)?; @@ -115,6 +100,8 @@ pub fn module() -> Result { m.function_meta(eq)?; m.function_meta(partial_cmp)?; m.function_meta(cmp)?; + #[cfg(feature = "std")] + m.function_meta(hash)?; Ok(m) } @@ -220,6 +207,47 @@ fn cmp(lhs: Value, rhs: Value) -> VmResult { Value::cmp(&lhs, &rhs) } +#[cfg(feature = "std")] +lazy_static::lazy_static! { + static ref STATE: RandomState = RandomState::new(); +} + +/// Hashes the given value. +/// +/// For non-builtin types this uses the [`HASH`] protocol. +/// +/// # Hash stability +/// +/// The hash is guaranteed to be stable within a single virtual machine +/// invocation, but not across virtual machines. So returning the hash from one +/// and calculating it in another using an identical value is not guaranteed to +/// produce the same hash. +/// +/// # Panics +/// +/// Panics if we try to generate a hash from an unhashable value. +/// +/// # Examples +/// +/// ```rune +/// use std::ops::hash; +/// +/// assert_eq!(hash([1, 2]), hash((1, 2))); +/// ``` +#[rune::function] +#[cfg(feature = "std")] +fn hash(value: Value) -> VmResult { + let mut hasher = Hasher::new_with(&*STATE); + + vm_try!(Value::hash_with( + &value, + &mut hasher, + &mut EnvProtocolCaller + )); + + VmResult::Ok(hasher.finish() as i64) +} + /// Advance a generator producing the next value yielded. /// /// Unlike [`Generator::resume`], this can only consume the yielded values. diff --git a/crates/rune/src/modules/string.rs b/crates/rune/src/modules/string.rs index f577acaa6..7cbf0537b 100644 --- a/crates/rune/src/modules/string.rs +++ b/crates/rune/src/modules/string.rs @@ -127,7 +127,7 @@ impl NotCharBoundary { /// [`Vec`]: crate::vec::Vec "Vec" /// [`&str`]: prim@str "&str" /// [`into_bytes`]: String::into_bytes -#[rune::function(path = String::from_utf8)] +#[rune::function(free, path = String::from_utf8)] fn from_utf8(bytes: &[u8]) -> Result { String::from_utf8(bytes.to_vec()) } @@ -163,12 +163,12 @@ fn as_bytes(s: &str) -> Bytes { /// let s = String::from("hello"); /// assert_eq!(s, "hello"); /// ``` -#[rune::function(path = String::from)] +#[rune::function(free, path = String::from)] fn string_from(value: &str) -> String { String::from(value) } -#[rune::function(path = String::from_str)] +#[rune::function(free, path = String::from_str)] fn string_from_str(value: &str) -> String { String::from(value) } @@ -190,7 +190,7 @@ fn string_from_str(value: &str) -> String { /// ``` /// let s = String::new(); /// ``` -#[rune::function(path = String::new)] +#[rune::function(free, path = String::new)] fn string_new() -> String { String::new() } @@ -233,7 +233,7 @@ fn string_new() -> String { /// // ...but this may make the string reallocate /// s.push('a'); /// ``` -#[rune::function(path = String::with_capacity)] +#[rune::function(free, path = String::with_capacity)] fn string_with_capacity(capacity: usize) -> String { String::with_capacity(capacity) } diff --git a/crates/rune/src/modules/tuple.rs b/crates/rune/src/modules/tuple.rs index b56879cf4..e6d79c79d 100644 --- a/crates/rune/src/modules/tuple.rs +++ b/crates/rune/src/modules/tuple.rs @@ -3,6 +3,8 @@ use core::cmp::Ordering; use crate as rune; +#[cfg(feature = "std")] +use crate::runtime::Hasher; use crate::runtime::{EnvProtocolCaller, Iterator, Ref, Tuple, Value, Vec, VmResult}; use crate::{ContextError, Module}; @@ -20,6 +22,8 @@ pub fn module() -> Result { m.function_meta(eq)?; m.function_meta(partial_cmp)?; m.function_meta(cmp)?; + #[cfg(feature = "std")] + m.function_meta(hash)?; Ok(m) } @@ -174,3 +178,20 @@ fn partial_cmp(this: &Tuple, other: &Tuple) -> VmResult> { fn cmp(this: &Tuple, other: &Tuple) -> VmResult { Vec::cmp_with(this, other, &mut EnvProtocolCaller) } + +/// Calculate a hash for a tuple. +/// +/// # Examples +/// +/// ```rune +/// use std::ops::hash; +/// +/// assert_eq!(hash((0, 2, 3)), hash((0, 2, 3))); +/// // Note: this is not guaranteed to be true forever, but it's true right now. +/// assert_eq!(hash((0, 2, 3)), hash([0, 2, 3])); +/// ``` +#[rune::function(instance, protocol = HASH)] +#[cfg(feature = "std")] +fn hash(this: &Tuple, hasher: &mut Hasher) -> VmResult<()> { + Tuple::hash_with(this, hasher, &mut EnvProtocolCaller) +} diff --git a/crates/rune/src/modules/vec.rs b/crates/rune/src/modules/vec.rs index 47eee82f3..c4ba74526 100644 --- a/crates/rune/src/modules/vec.rs +++ b/crates/rune/src/modules/vec.rs @@ -4,6 +4,8 @@ use core::cmp::Ordering; use core::fmt; use crate as rune; +#[cfg(feature = "std")] +use crate::runtime::Hasher; use crate::runtime::{ EnvProtocolCaller, Formatter, Function, Iterator, Ref, TypeOf, Value, Vec, VmErrorKind, VmResult, @@ -54,6 +56,8 @@ pub fn module() -> Result { m.function_meta(eq)?; m.function_meta(partial_cmp)?; m.function_meta(cmp)?; + #[cfg(feature = "std")] + m.function_meta(hash)?; Ok(m) } @@ -66,7 +70,7 @@ pub fn module() -> Result { /// ```rune /// let vec = Vec::new(); /// ``` -#[rune::function(path = Vec::new)] +#[rune::function(free, path = Vec::new)] fn vec_new() -> Vec { Vec::new() } @@ -114,7 +118,7 @@ fn vec_new() -> Vec { /// assert_eq!(vec.len(), 11); /// assert!(vec.capacity() >= 11); /// ``` -#[rune::function(path = Vec::with_capacity)] +#[rune::function(free, path = Vec::with_capacity)] fn vec_with_capacity(capacity: usize) -> Vec { Vec::with_capacity(capacity) } @@ -185,7 +189,7 @@ fn capacity(vec: &Vec) -> usize { /// assert_eq!(None, v.get(3)); /// assert_eq!(None, v.get(0..4)); /// ``` -#[rune::function(instance, path = Vec::get)] +#[rune::function(instance)] fn get(this: &Vec, index: Value) -> VmResult> { Vec::index_get(this, index) } @@ -615,3 +619,18 @@ fn partial_cmp(this: &Vec, other: &Vec) -> VmResult> { fn cmp(this: &Vec, other: &Vec) -> VmResult { Vec::cmp_with(this, other, &mut EnvProtocolCaller) } + +/// Calculate the hash of a vector. +/// +/// # Examples +/// +/// ```rune +/// use std::ops::hash; +/// +/// assert_eq!(hash([0, 2, 3]), hash([0, 2, 3])); +/// ``` +#[rune::function(instance, protocol = HASH)] +#[cfg(feature = "std")] +fn hash(this: &Vec, hasher: &mut Hasher) -> VmResult<()> { + Vec::hash_with(this, hasher, &mut EnvProtocolCaller) +} diff --git a/crates/rune/src/query/query.rs b/crates/rune/src/query/query.rs index 315267ef1..a86914aee 100644 --- a/crates/rune/src/query/query.rs +++ b/crates/rune/src/query/query.rs @@ -1,9 +1,11 @@ use core::fmt; +#[cfg(feature = "emit")] use core::mem::take; use crate::no_std::borrow::Cow; use crate::no_std::collections::{hash_map, BTreeMap, HashMap, HashSet, VecDeque}; use crate::no_std::prelude::*; +use crate::no_std::rc::Rc; use crate::no_std::sync::Arc; use crate::ast::{Span, Spanned}; @@ -62,7 +64,7 @@ pub(crate) struct QueryInner<'arena> { /// be compiled. indexed: BTreeMap>, /// Compiled constant functions. - const_fns: HashMap>>, + const_fns: HashMap>>, /// Indexed constant values. constants: HashMap, /// Query paths. @@ -283,6 +285,7 @@ impl<'a, 'arena> Query<'a, 'arena> { Err(Box::new(ErrorKind::AmbiguousContextItem { item: self.pool.item(item).to_owned(), + #[cfg(feature = "emit")] infos: metas.map(|i| i.info()).collect(), })) } @@ -319,7 +322,10 @@ impl<'a, 'arena> Query<'a, 'arena> { }; let Some(item) = &meta.item else { - return Err(compile::Error::new(location.as_spanned(), ErrorKind::MissingItemHash { hash: meta.hash })); + return Err(compile::Error::new( + location.as_spanned(), + ErrorKind::MissingItemHash { hash: meta.hash }, + )); }; let meta = meta::Meta { @@ -410,6 +416,7 @@ impl<'a, 'arena> Query<'a, 'arena> { let item = self.insert_new_item(items, location, parent, visibility, docs)?; let query_mod = self.pool.alloc_module(ModMeta { + #[cfg(feature = "emit")] location: location.location(), item: item.item, visibility, @@ -433,6 +440,7 @@ impl<'a, 'arena> Query<'a, 'arena> { let location = Location::new(source_id, spanned); let module = self.pool.alloc_module(ModMeta { + #[cfg(feature = "emit")] location, item: ItemId::default(), visibility: Visibility::Public, @@ -587,7 +595,7 @@ impl<'a, 'arena> Query<'a, 'arena> { } /// Get the constant function associated with the opaque. - pub(crate) fn const_fn_for(&self, ast: T) -> compile::Result>, MissingId> + pub(crate) fn const_fn_for(&self, ast: T) -> compile::Result>, MissingId> where T: Opaque, { @@ -855,8 +863,16 @@ impl<'a, 'arena> Query<'a, 'arena> { return Err(compile::Error::msg(path, "Tried to use non-indexed path")); }; - let Some(&QueryPath { module, item, impl_item }) = self.inner.query_paths.get(&id) else { - return Err(compile::Error::msg(path, format_args!("Missing query path for id {}", id))); + let Some(&QueryPath { + module, + item, + impl_item, + }) = self.inner.query_paths.get(&id) + else { + return Err(compile::Error::msg( + path, + format_args!("Missing query path for id {}", id), + )); }; let mut in_self_type = false; @@ -871,7 +887,10 @@ impl<'a, 'arena> Query<'a, 'arena> { (None, segment) => match segment { ast::PathSegment::Ident(ident) => self.convert_initial_path(module, item, ident)?, ast::PathSegment::Super(..) => { - let Some(segment) = self.pool.try_map_alloc(self.pool.module(module).item, Item::parent) else { + let Some(segment) = self + .pool + .try_map_alloc(self.pool.module(module).item, Item::parent) + else { return Err(compile::Error::new(segment, ErrorKind::UnsupportedSuper)); }; @@ -879,7 +898,10 @@ impl<'a, 'arena> Query<'a, 'arena> { } ast::PathSegment::SelfType(..) => { let Some(impl_item) = impl_item else { - return Err(compile::Error::new(segment.span(), ErrorKind::UnsupportedSelfType)); + return Err(compile::Error::new( + segment.span(), + ErrorKind::UnsupportedSelfType, + )); }; in_self_type = true; @@ -922,10 +944,7 @@ impl<'a, 'arena> Query<'a, 'arena> { } ast::PathSegment::Generics(arguments) => { let Some(p) = parameters_it.next() else { - return Err(compile::Error::new( - segment, - ErrorKind::UnsupportedGenerics, - )); + return Err(compile::Error::new(segment, ErrorKind::UnsupportedGenerics)); }; trailing += 1; @@ -954,13 +973,11 @@ impl<'a, 'arena> Query<'a, 'arena> { item.push(ident.resolve(resolve_context!(self))?); let Some(p) = parameters_it.next() else { - return Err(compile::Error::new( - segment, - ErrorKind::UnsupportedGenerics, - )); + return Err(compile::Error::new(segment, ErrorKind::UnsupportedGenerics)); }; - let Some(ast::PathSegment::Generics(arguments)) = it.clone().next().map(|(_, p)| p) else { + let Some(ast::PathSegment::Generics(arguments)) = it.clone().next().map(|(_, p)| p) + else { continue; }; @@ -1004,8 +1021,15 @@ impl<'a, 'arena> Query<'a, 'arena> { None => None, }; - let Some(last) = alias.as_ref().map(IntoComponent::as_component_ref).or_else(|| target.last()) else { - return Err(compile::Error::new(location.as_spanned(), ErrorKind::LastUseComponent)); + let Some(last) = alias + .as_ref() + .map(IntoComponent::as_component_ref) + .or_else(|| target.last()) + else { + return Err(compile::Error::new( + location.as_spanned(), + ErrorKind::LastUseComponent, + )); }; let item = self.pool.alloc_item(at.extended(last)); @@ -1087,7 +1111,14 @@ impl<'a, 'arena> Query<'a, 'arena> { cur.push(c); let cur = self.pool.alloc_item(&cur); - let update = self.import_step(span, module, cur, used, &mut path)?; + let update = self.import_step( + span, + module, + cur, + used, + #[cfg(feature = "emit")] + &mut path, + )?; let update = match update { Some(update) => update, @@ -1100,7 +1131,13 @@ impl<'a, 'arena> Query<'a, 'arena> { }); if !visited.insert(self.pool.alloc_item(&item)) { - return Err(compile::Error::new(span, ErrorKind::ImportCycle { path })); + return Err(compile::Error::new( + span, + ErrorKind::ImportCycle { + #[cfg(feature = "emit")] + path, + }, + )); } module = update.module; @@ -1127,7 +1164,7 @@ impl<'a, 'arena> Query<'a, 'arena> { module: ModId, item: ItemId, used: Used, - path: &mut Vec, + #[cfg(feature = "emit")] path: &mut Vec, ) -> compile::Result> { // already resolved query. if let Some(meta) = self.inner.meta.get(&(item, Hash::EMPTY)) { @@ -1148,8 +1185,10 @@ impl<'a, 'arena> Query<'a, 'arena> { module, item, entry.item_meta.module, + #[cfg(feature = "emit")] entry.item_meta.location, entry.item_meta.visibility, + #[cfg(feature = "emit")] path, )?; @@ -1213,7 +1252,10 @@ impl<'a, 'arena> Query<'a, 'arena> { // Ensure that the enum is being built and marked as used. let Some(enum_meta) = self.query_meta(span, enum_.item, Default::default())? else { - return Err(compile::Error::msg(span, format_args!("Missing enum by {:?}", variant.enum_id))); + return Err(compile::Error::msg( + span, + format_args!("Missing enum by {:?}", variant.enum_id), + )); }; meta::Kind::Variant { @@ -1415,7 +1457,7 @@ impl<'a, 'arena> Query<'a, 'arena> { self.inner.const_fns.insert( id, - Arc::new(ConstFn { + Rc::new(ConstFn { item_meta, ir_fn, hir, @@ -1536,6 +1578,7 @@ impl<'a, 'arena> Query<'a, 'arena> { span, ErrorKind::AmbiguousItem { item: self.pool.item(cur.item_meta.item).to_owned(), + #[cfg(feature = "emit")] locations: locations .into_iter() .map(|(loc, item)| (loc, self.pool.item(item).to_owned())) @@ -1549,6 +1592,7 @@ impl<'a, 'arena> Query<'a, 'arena> { span, ErrorKind::AmbiguousItem { item: self.pool.item(cur.item_meta.item).to_owned(), + #[cfg(feature = "emit")] locations: locations .into_iter() .map(|(loc, item)| (loc, self.pool.item(item).to_owned())) @@ -1617,10 +1661,11 @@ impl<'a, 'arena> Query<'a, 'arena> { from: ModId, item: ItemId, module: ModId, - location: Location, + #[cfg(feature = "emit")] location: Location, visibility: Visibility, - chain: &mut Vec, + #[cfg(feature = "emit")] chain: &mut Vec, ) -> compile::Result<()> { + #[cfg(feature = "emit")] fn into_chain(chain: Vec) -> Vec { chain.into_iter().map(|c| c.location).collect() } @@ -1649,7 +1694,9 @@ impl<'a, 'arena> Query<'a, 'arena> { return Err(compile::Error::new( span, ErrorKind::NotVisibleMod { + #[cfg(feature = "emit")] chain: into_chain(take(chain)), + #[cfg(feature = "emit")] location: m.location, visibility: m.visibility, item: current_module, @@ -1663,7 +1710,9 @@ impl<'a, 'arena> Query<'a, 'arena> { return Err(compile::Error::new( span, ErrorKind::NotVisible { + #[cfg(feature = "emit")] chain: into_chain(take(chain)), + #[cfg(feature = "emit")] location, visibility, item: self.pool.item(item).to_owned(), diff --git a/crates/rune/src/runtime.rs b/crates/rune/src/runtime.rs index f44601427..ac18f9091 100644 --- a/crates/rune/src/runtime.rs +++ b/crates/rune/src/runtime.rs @@ -167,8 +167,10 @@ mod vm_call; pub(crate) use self::vm_call::VmCall; mod vm_error; +#[cfg(feature = "emit")] +pub(crate) use self::vm_error::VmErrorAt; +pub(crate) use self::vm_error::VmErrorKind; pub use self::vm_error::{try_result, TryFromResult, VmError, VmIntegerRepr, VmResult}; -pub(crate) use self::vm_error::{VmErrorAt, VmErrorKind}; mod vm_execution; pub use self::vm_execution::{ExecutionState, VmExecution, VmSendExecution}; @@ -182,3 +184,8 @@ pub use self::fmt::Formatter; mod control_flow; pub use self::control_flow::ControlFlow; + +#[cfg(feature = "std")] +mod hasher; +#[cfg(feature = "std")] +pub use self::hasher::Hasher; diff --git a/crates/rune/src/runtime/bytes.rs b/crates/rune/src/runtime/bytes.rs index 15a2147a1..56dc22932 100644 --- a/crates/rune/src/runtime/bytes.rs +++ b/crates/rune/src/runtime/bytes.rs @@ -10,13 +10,14 @@ use crate::no_std::prelude::*; use serde::{Deserialize, Serialize}; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{RawRef, RawStr, Ref, UnsafeToRef, Value, VmResult}; +use crate as rune; +use crate::runtime::{RawRef, Ref, UnsafeToRef, Value, VmResult}; +use crate::Any; /// A vector of bytes. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] +#[derive(Any, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] #[serde(transparent)] +#[rune(builtin, static_type = BYTES_TYPE)] pub struct Bytes { #[serde(with = "serde_bytes")] pub(crate) bytes: Vec, @@ -300,16 +301,10 @@ impl UnsafeToRef for [u8] { let (value, guard) = Ref::into_raw(bytes); // Safety: we're holding onto the guard for the slice here, so it is // live. - VmResult::Ok(((*value).as_slice(), guard)) + VmResult::Ok((value.as_ref().as_slice(), guard)) } } -impl Named for Bytes { - const BASE_NAME: RawStr = RawStr::from_str("Bytes"); -} - -impl InstallWith for Bytes {} - impl cmp::PartialEq<[u8; N]> for Bytes { #[inline] fn eq(&self, other: &[u8; N]) -> bool { diff --git a/crates/rune/src/runtime/control_flow.rs b/crates/rune/src/runtime/control_flow.rs index 51b7f698c..65f5e3a05 100644 --- a/crates/rune/src/runtime/control_flow.rs +++ b/crates/rune/src/runtime/control_flow.rs @@ -21,7 +21,7 @@ use crate::Any; /// assert_eq!(c, ControlFlow::Continue(42)); /// ``` #[derive(Debug, Clone, Any)] -#[rune(builtin)] +#[rune(builtin, static_type = CONTROL_FLOW_TYPE)] pub enum ControlFlow { /// Move on to the next phase of the operation as normal. #[rune(constructor)] diff --git a/crates/rune/src/runtime/format.rs b/crates/rune/src/runtime/format.rs index 0e2298978..e341c4396 100644 --- a/crates/rune/src/runtime/format.rs +++ b/crates/rune/src/runtime/format.rs @@ -12,9 +12,9 @@ use crate::no_std::prelude::*; use musli::{Decode, Encode}; use serde::{Deserialize, Serialize}; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{Formatter, FromValue, ProtocolCaller, RawStr, Value, VmErrorKind, VmResult}; +use crate as rune; +use crate::runtime::{Formatter, FromValue, ProtocolCaller, Value, VmErrorKind, VmResult}; +use crate::Any; /// Error raised when trying to parse a type string and it fails. #[derive(Debug, Clone, Copy)] @@ -40,7 +40,8 @@ impl fmt::Display for AlignmentFromStrError { } /// A format specification, wrapping an inner value. -#[derive(Debug, Clone)] +#[derive(Any, Debug, Clone)] +#[rune(builtin, static_type = FORMAT_TYPE)] pub struct Format { /// The value being formatted. pub(crate) value: Value, @@ -48,12 +49,6 @@ pub struct Format { pub(crate) spec: FormatSpec, } -impl Named for Format { - const BASE_NAME: RawStr = RawStr::from_str("Format"); -} - -impl InstallWith for Format {} - impl FromValue for Format { #[inline] fn from_value(value: Value) -> VmResult { diff --git a/crates/rune/src/runtime/from_value.rs b/crates/rune/src/runtime/from_value.rs index fa6b045b5..70640c82b 100644 --- a/crates/rune/src/runtime/from_value.rs +++ b/crates/rune/src/runtime/from_value.rs @@ -242,7 +242,7 @@ impl UnsafeToRef for Option { let option = vm_try!(value.into_option()); let option = vm_try!(option.into_ref()); let (value, guard) = Ref::into_raw(option); - VmResult::Ok((&*value, guard)) + VmResult::Ok((value.as_ref(), guard)) } } @@ -252,8 +252,8 @@ impl UnsafeToMut for Option { unsafe fn unsafe_to_mut<'a>(value: Value) -> VmResult<(&'a mut Self, Self::Guard)> { let option = vm_try!(value.into_option()); let option = vm_try!(option.into_mut()); - let (value, guard) = Mut::into_raw(option); - VmResult::Ok((&mut *value, guard)) + let (mut value, guard) = Mut::into_raw(option); + VmResult::Ok((value.as_mut(), guard)) } } @@ -312,7 +312,7 @@ impl UnsafeToRef for str { Value::String(string) => { let string = vm_try!(string.into_ref()); let (string, guard) = Ref::into_raw(string); - VmResult::Ok((&*string, guard)) + VmResult::Ok((string.as_ref(), guard)) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } @@ -326,8 +326,8 @@ impl UnsafeToMut for str { match value { Value::String(string) => { let string = vm_try!(string.into_mut()); - let (string, guard) = Mut::into_raw(string); - VmResult::Ok(((*string).as_mut_str(), guard)) + let (mut string, guard) = Mut::into_raw(string); + VmResult::Ok((string.as_mut().as_mut_str(), guard)) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } @@ -342,7 +342,7 @@ impl UnsafeToRef for String { Value::String(string) => { let string = vm_try!(string.into_ref()); let (string, guard) = Ref::into_raw(string); - VmResult::Ok((&*string, guard)) + VmResult::Ok((string.as_ref(), guard)) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } @@ -356,8 +356,8 @@ impl UnsafeToMut for String { match value { Value::String(string) => { let string = vm_try!(string.into_mut()); - let (string, guard) = Mut::into_raw(string); - VmResult::Ok((&mut *string, guard)) + let (mut string, guard) = Mut::into_raw(string); + VmResult::Ok((string.as_mut(), guard)) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } @@ -384,7 +384,7 @@ impl UnsafeToRef for Result { let result = vm_try!(value.into_result()); let result = vm_try!(result.into_ref()); let (value, guard) = Ref::into_raw(result); - VmResult::Ok((&*value, guard)) + VmResult::Ok((value.as_ref(), guard)) } } @@ -394,8 +394,8 @@ impl UnsafeToMut for Result { unsafe fn unsafe_to_mut<'a>(value: Value) -> VmResult<(&'a mut Self, Self::Guard)> { let result = vm_try!(value.into_result()); let result = vm_try!(result.into_mut()); - let (value, guard) = Mut::into_raw(result); - VmResult::Ok((&mut *value, guard)) + let (mut value, guard) = Mut::into_raw(result); + VmResult::Ok((value.as_mut(), guard)) } } diff --git a/crates/rune/src/runtime/function.rs b/crates/rune/src/runtime/function.rs index a7a21e8cc..637ee024c 100644 --- a/crates/rune/src/runtime/function.rs +++ b/crates/rune/src/runtime/function.rs @@ -41,7 +41,7 @@ use crate::Hash; /// ``` #[derive(Any)] #[repr(transparent)] -#[rune(builtin)] +#[rune(builtin, static_type = FUNCTION_TYPE)] pub struct Function(FunctionImpl); impl Function { diff --git a/crates/rune/src/runtime/future.rs b/crates/rune/src/runtime/future.rs index f781bbd93..c511d267b 100644 --- a/crates/rune/src/runtime/future.rs +++ b/crates/rune/src/runtime/future.rs @@ -5,9 +5,9 @@ use core::task::{Context, Poll}; use crate::no_std::prelude::*; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{RawStr, ToValue, Value, VmErrorKind, VmResult}; +use crate as rune; +use crate::runtime::{ToValue, Value, VmErrorKind, VmResult}; +use crate::Any; use pin_project::pin_project; @@ -16,6 +16,8 @@ type DynFuture = dyn future::Future> + 'static; /// A type-erased future that can only be unsafely polled in combination with /// the virtual machine that created it. +#[derive(Any)] +#[rune(builtin, static_type = FUTURE_TYPE, from_value = Value::into_future)] pub struct Future { future: Option>>, } @@ -109,11 +111,3 @@ where } } } - -from_value!(Future, into_future); - -impl Named for Future { - const BASE_NAME: RawStr = RawStr::from_str("Future"); -} - -impl InstallWith for Future {} diff --git a/crates/rune/src/runtime/generator.rs b/crates/rune/src/runtime/generator.rs index 415b9532f..b494c03cc 100644 --- a/crates/rune/src/runtime/generator.rs +++ b/crates/rune/src/runtime/generator.rs @@ -1,13 +1,29 @@ use core::fmt; use core::iter; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{ - GeneratorState, Iterator, RawStr, Value, Vm, VmErrorKind, VmExecution, VmResult, -}; - -/// A generator with a stored virtual machine. +use crate as rune; +use crate::runtime::{GeneratorState, Iterator, Value, Vm, VmErrorKind, VmExecution, VmResult}; +use crate::Any; + +/// The return value of a function producing a generator. +/// +/// Functions which contain the `yield` keyword produces generators. +/// +/// # Examples +/// +/// ```rune +/// use std::ops::Generator; +/// +/// fn generate() { +/// yield 1; +/// yield 2; +/// } +/// +/// let g = generate(); +/// assert!(g is Generator) +/// ``` +#[derive(Any)] +#[rune(builtin, static_type = GENERATOR_TYPE, from_value = Value::into_generator, from_value_params = [Vm])] pub struct Generator where T: AsRef + AsMut, @@ -116,14 +132,3 @@ where .finish() } } - -impl Named for Generator -where - T: AsRef + AsMut, -{ - const BASE_NAME: RawStr = RawStr::from_str("Generator"); -} - -impl InstallWith for Generator where T: AsRef + AsMut {} - -from_value!(Generator, into_generator); diff --git a/crates/rune/src/runtime/generator_state.rs b/crates/rune/src/runtime/generator_state.rs index ca49906b7..ccaf28cb7 100644 --- a/crates/rune/src/runtime/generator_state.rs +++ b/crates/rune/src/runtime/generator_state.rs @@ -1,6 +1,6 @@ -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{ProtocolCaller, RawStr, Value, VmResult}; +use crate as rune; +use crate::runtime::{ProtocolCaller, Value, VmResult}; +use crate::Any; /// The state of a generator. /// @@ -48,7 +48,8 @@ use crate::runtime::{ProtocolCaller, RawStr, Value, VmResult}; /// assert_eq!(ret, 42); /// # Ok::<_, rune::Error>(()) /// ``` -#[derive(Debug)] +#[derive(Any, Debug)] +#[rune(builtin, static_type = GENERATOR_STATE_TYPE)] pub enum GeneratorState { /// The generator yielded. Yielded(Value), @@ -97,9 +98,3 @@ impl GeneratorState { } from_value!(GeneratorState, into_generator_state); - -impl Named for GeneratorState { - const BASE_NAME: RawStr = RawStr::from_str("GeneratorState"); -} - -impl InstallWith for GeneratorState {} diff --git a/crates/rune/src/runtime/hasher.rs b/crates/rune/src/runtime/hasher.rs new file mode 100644 index 000000000..f90c4478f --- /dev/null +++ b/crates/rune/src/runtime/hasher.rs @@ -0,0 +1,60 @@ +use core::hash::{BuildHasher, Hasher as _}; + +use crate::no_std::collections::hash_map::DefaultHasher; + +use crate as rune; +use crate::Any; + +/// The default hasher used in Rune. +#[derive(Any)] +#[rune(item = ::std::hash)] +pub struct Hasher { + hasher: DefaultHasher, +} + +impl Hasher { + /// Construct a new empty hasher. + pub(crate) fn new_with(build_hasher: &S) -> Self + where + S: BuildHasher, + { + Self { + hasher: build_hasher.build_hasher(), + } + } + + /// Hash some bytes. + pub(crate) fn write(&mut self, bytes: &[u8]) { + self.hasher.write(bytes); + } + + /// Hash a string. + pub(crate) fn write_str(&mut self, string: &str) { + self.hasher.write(string.as_bytes()); + } + + /// Hash an 64-bit float. + /// + /// You should ensure that the float is normal per the [`f64::is_normal`] + /// function before hashing it, since otherwise equality tests against the + /// float won't work as intended. Otherwise, know what you're doing. + pub(crate) fn write_f64(&mut self, value: f64) { + let bits = value.to_bits(); + self.hasher.write_u64(bits); + } + + /// Hash a 64-bit signed integer. + pub(crate) fn write_i64(&mut self, value: i64) { + self.hasher.write_i64(value); + } + + /// Hash an 8-bit unsigned integer. + pub(crate) fn write_u8(&mut self, value: u8) { + self.hasher.write_u8(value); + } + + /// Construct a hash. + pub fn finish(self) -> u64 { + self.hasher.finish() + } +} diff --git a/crates/rune/src/runtime/iterator.rs b/crates/rune/src/runtime/iterator.rs index f44df95a7..ca56f486c 100644 --- a/crates/rune/src/runtime/iterator.rs +++ b/crates/rune/src/runtime/iterator.rs @@ -5,9 +5,9 @@ use core::iter; use crate::no_std::prelude::*; use crate::no_std::vec; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{FromValue, Function, Panic, RawStr, ToValue, Value, VmErrorKind, VmResult}; +use crate as rune; +use crate::runtime::{FromValue, Function, Panic, ToValue, Value, VmErrorKind, VmResult}; +use crate::Any; // Note: A fair amount of code in this module is duplicated from the Rust // project under the MIT license. @@ -72,6 +72,8 @@ macro_rules! maybe { } /// An owning iterator. +#[derive(Any)] +#[rune(builtin, static_type = ITERATOR_TYPE, from_value = Value::into_iterator)] pub struct Iterator { iter: IterRepr, } @@ -222,16 +224,6 @@ impl Iterator { }) } - #[inline] - pub(crate) fn chain_raw(self, other: Self) -> VmResult { - VmResult::Ok(Self { - iter: IterRepr::Chain(Box::new(Chain { - a: Some(self.iter), - b: Some(other.iter), - })), - }) - } - #[inline] pub(crate) fn rev(self) -> VmResult { if !self.iter.is_double_ended() { @@ -384,14 +376,6 @@ impl fmt::Debug for Iterator { } } -impl Named for Iterator { - const BASE_NAME: RawStr = RawStr::from_str("Iterator"); -} - -impl InstallWith for Iterator {} - -from_value!(Iterator, into_iterator); - /// The inner representation of an [Iterator]. It handles all the necessary /// dynamic dispatch to support dynamic iterators. enum IterRepr { diff --git a/crates/rune/src/runtime/object.rs b/crates/rune/src/runtime/object.rs index 8918b3214..693c22f5e 100644 --- a/crates/rune/src/runtime/object.rs +++ b/crates/rune/src/runtime/object.rs @@ -9,9 +9,9 @@ use crate::no_std::collections::{btree_map, BTreeMap}; use crate::no_std::prelude::*; use crate as rune; -use crate::compile::{ItemBuf, Named}; -use crate::module::InstallWith; -use crate::runtime::{FromValue, Iterator, ProtocolCaller, RawStr, Ref, ToValue, Value, VmResult}; +use crate::compile::ItemBuf; +use crate::runtime::{FromValue, Iterator, ProtocolCaller, Ref, ToValue, Value, VmResult}; +use crate::Any; /// An owning iterator over the entries of a `Object`. /// @@ -60,9 +60,9 @@ pub type Values<'a> = btree_map::Values<'a, String, Value>; /// Struct representing a dynamic anonymous object. /// -/// # Examples +/// # Rust Examples /// -/// ``` +/// ```rust /// let mut object = rune::runtime::Object::new(); /// assert!(object.is_empty()); /// @@ -75,8 +75,9 @@ pub type Values<'a> = btree_map::Values<'a, String, Value>; /// assert_eq!(None::, object.get_value("baz").into_result()?); /// # Ok::<_, rune::Error>(()) /// ``` -#[derive(Default, Clone)] +#[derive(Any, Default, Clone)] #[repr(transparent)] +#[rune(builtin, static_type = OBJECT_TYPE)] pub struct Object { inner: BTreeMap, } @@ -463,12 +464,6 @@ impl iter::FromIterator<(String, Value)> for Object { from_value!(Object, into_object); -impl Named for Object { - const BASE_NAME: RawStr = RawStr::from_str("Object"); -} - -impl InstallWith for Object {} - pub struct DebugStruct<'a> { item: &'a ItemBuf, st: &'a Object, diff --git a/crates/rune/src/runtime/protocol_caller.rs b/crates/rune/src/runtime/protocol_caller.rs index 87a49f0c7..8fff54042 100644 --- a/crates/rune/src/runtime/protocol_caller.rs +++ b/crates/rune/src/runtime/protocol_caller.rs @@ -67,7 +67,10 @@ impl ProtocolCaller for EnvProtocolCaller { } let Some(handler) = context.function(hash) else { - return VmResult::err(VmErrorKind::MissingInstanceFunction { hash, instance: vm_try!(target.type_info()) }); + return VmResult::err(VmErrorKind::MissingInstanceFunction { + hash, + instance: vm_try!(target.type_info()), + }); }; let mut stack = Stack::with_capacity(count); diff --git a/crates/rune/src/runtime/range.rs b/crates/rune/src/runtime/range.rs index 285be267b..7ceb81331 100644 --- a/crates/rune/src/runtime/range.rs +++ b/crates/rune/src/runtime/range.rs @@ -52,7 +52,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range, static_type = RANGE_TYPE)] pub struct Range { /// The start value of the range. #[rune(get, set)] @@ -317,5 +317,3 @@ where VmResult::Ok(ops::Range { start, end }) } } - -from_value!(Range, into_range); diff --git a/crates/rune/src/runtime/range_from.rs b/crates/rune/src/runtime/range_from.rs index 9686b1ebb..74b3a1f75 100644 --- a/crates/rune/src/runtime/range_from.rs +++ b/crates/rune/src/runtime/range_from.rs @@ -49,7 +49,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range_from, static_type = RANGE_FROM_TYPE)] pub struct RangeFrom { /// The start value of the range. #[rune(get, set)] @@ -285,5 +285,3 @@ where VmResult::Ok(ops::RangeFrom { start }) } } - -from_value!(RangeFrom, into_range_from); diff --git a/crates/rune/src/runtime/range_full.rs b/crates/rune/src/runtime/range_full.rs index 1abad671e..a1f6b6319 100644 --- a/crates/rune/src/runtime/range_full.rs +++ b/crates/rune/src/runtime/range_full.rs @@ -30,7 +30,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Default, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range_full, static_type = RANGE_FULL_TYPE)] pub struct RangeFull; impl RangeFull { @@ -105,5 +105,3 @@ impl FromValue for ops::RangeFull { VmResult::Ok(ops::RangeFull) } } - -from_value!(RangeFull, into_range_full); diff --git a/crates/rune/src/runtime/range_inclusive.rs b/crates/rune/src/runtime/range_inclusive.rs index ea4187f96..b4da9f62f 100644 --- a/crates/rune/src/runtime/range_inclusive.rs +++ b/crates/rune/src/runtime/range_inclusive.rs @@ -53,7 +53,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range_inclusive, static_type = RANGE_INCLUSIVE_TYPE)] pub struct RangeInclusive { /// The start value of the range. #[rune(get, set)] @@ -318,5 +318,3 @@ where VmResult::Ok(start..=end) } } - -from_value!(RangeInclusive, into_range_inclusive); diff --git a/crates/rune/src/runtime/range_to.rs b/crates/rune/src/runtime/range_to.rs index a36100411..337774e5e 100644 --- a/crates/rune/src/runtime/range_to.rs +++ b/crates/rune/src/runtime/range_to.rs @@ -39,7 +39,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range_to, static_type = RANGE_TO_TYPE)] pub struct RangeTo { /// The end value of the range. #[rune(get, set)] @@ -206,5 +206,3 @@ where VmResult::Ok(ops::RangeTo { end }) } } - -from_value!(RangeTo, into_range_to); diff --git a/crates/rune/src/runtime/range_to_inclusive.rs b/crates/rune/src/runtime/range_to_inclusive.rs index 71acbb9d3..654af1914 100644 --- a/crates/rune/src/runtime/range_to_inclusive.rs +++ b/crates/rune/src/runtime/range_to_inclusive.rs @@ -39,7 +39,7 @@ use crate::Any; /// # Ok::<_, rune::Error>(()) /// ``` #[derive(Any, Clone)] -#[rune(builtin, constructor)] +#[rune(builtin, constructor, from_value = Value::into_range_to_inclusive, static_type = RANGE_TO_INCLUSIVE_TYPE)] pub struct RangeToInclusive { /// The end value of the range. #[rune(get, set)] @@ -206,5 +206,3 @@ where VmResult::Ok(ops::RangeToInclusive { end }) } } - -from_value!(RangeToInclusive, into_range_to_inclusive); diff --git a/crates/rune/src/runtime/shared.rs b/crates/rune/src/runtime/shared.rs index fe0c76365..e36d9ce4a 100644 --- a/crates/rune/src/runtime/shared.rs +++ b/crates/rune/src/runtime/shared.rs @@ -1064,13 +1064,13 @@ impl Ref { /// The returned pointer must not outlive the associated guard, since this /// prevents other uses of the underlying data which is incompatible with /// the current. - pub fn into_raw(this: Self) -> (*const T, RawRef) { + pub fn into_raw(this: Self) -> (ptr::NonNull, RawRef) { let guard = RawRef { _guard: this.guard, _inner: this.inner, }; - (this.data.as_ptr(), guard) + (this.data, guard) } } @@ -1207,13 +1207,13 @@ impl Mut { /// The returned pointer must not outlive the associated guard, since this /// prevents other uses of the underlying data which is incompatible with /// the current. - pub fn into_raw(this: Self) -> (*mut T, RawMut) { + pub fn into_raw(this: Self) -> (ptr::NonNull, RawMut) { let guard = RawMut { _guard: this.guard, _inner: this.inner, }; - (this.data.as_ptr(), guard) + (this.data, guard) } } diff --git a/crates/rune/src/runtime/static_type.rs b/crates/rune/src/runtime/static_type.rs index a2e3f7c06..36222fa07 100644 --- a/crates/rune/src/runtime/static_type.rs +++ b/crates/rune/src/runtime/static_type.rs @@ -114,7 +114,6 @@ pub(crate) static BYTES_TYPE: &StaticType = &StaticType { hash: ::rune_macros::hash!(::std::bytes::Bytes), }; -impl_static_type!(rt::Bytes => BYTES_TYPE); impl_static_type!([u8] => BYTES_TYPE); pub(crate) static VEC_TYPE: &StaticType = &StaticType { @@ -122,9 +121,8 @@ pub(crate) static VEC_TYPE: &StaticType = &StaticType { hash: ::rune_macros::hash!(::std::vec::Vec), }; -impl_static_type!(rt::Vec => VEC_TYPE); -impl_static_type!(impl vec::Vec => VEC_TYPE); impl_static_type!([rt::Value] => VEC_TYPE); +impl_static_type!(impl vec::Vec => VEC_TYPE); impl_static_type!(impl rt::VecTuple => VEC_TYPE); pub(crate) static TUPLE_TYPE: &StaticType = &StaticType { @@ -133,64 +131,50 @@ pub(crate) static TUPLE_TYPE: &StaticType = &StaticType { }; impl_static_type!(rt::OwnedTuple => TUPLE_TYPE); -impl_static_type!(rt::Tuple => TUPLE_TYPE); pub(crate) static OBJECT_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Object"), hash: ::rune_macros::hash!(::std::object::Object), }; -impl_static_type!(rt::Object => OBJECT_TYPE); impl_static_type!(rt::Struct => OBJECT_TYPE); +impl_static_type!(impl HashMap => OBJECT_TYPE); pub(crate) static RANGE_FROM_TYPE: &StaticType = &StaticType { name: RawStr::from_str("RangeFrom"), hash: ::rune_macros::hash!(::std::ops::RangeFrom), }; -impl_static_type!(rt::RangeFrom => RANGE_FROM_TYPE); - pub(crate) static RANGE_FULL_TYPE: &StaticType = &StaticType { name: RawStr::from_str("RangeFull"), hash: ::rune_macros::hash!(::std::ops::RangeFull), }; -impl_static_type!(rt::RangeFull => RANGE_FULL_TYPE); - pub(crate) static RANGE_INCLUSIVE_TYPE: &StaticType = &StaticType { name: RawStr::from_str("RangeInclusive"), hash: ::rune_macros::hash!(::std::ops::RangeInclusive), }; -impl_static_type!(rt::RangeInclusive => RANGE_INCLUSIVE_TYPE); - pub(crate) static RANGE_TO_INCLUSIVE_TYPE: &StaticType = &StaticType { name: RawStr::from_str("RangeToInclusive"), hash: ::rune_macros::hash!(::std::ops::RangeToInclusive), }; -impl_static_type!(rt::RangeToInclusive => RANGE_TO_INCLUSIVE_TYPE); - pub(crate) static RANGE_TO_TYPE: &StaticType = &StaticType { name: RawStr::from_str("RangeTo"), hash: ::rune_macros::hash!(::std::ops::RangeTo), }; -impl_static_type!(rt::RangeTo => RANGE_TO_TYPE); - pub(crate) static RANGE_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Range"), hash: ::rune_macros::hash!(::std::ops::Range), }; -impl_static_type!(rt::Range => RANGE_TYPE); - pub(crate) static CONTROL_FLOW_TYPE: &StaticType = &StaticType { name: RawStr::from_str("ControlFlow"), hash: ::rune_macros::hash!(::std::ops::ControlFlow), }; -impl_static_type!(rt::ControlFlow => CONTROL_FLOW_TYPE); impl_static_type!(impl ControlFlow => CONTROL_FLOW_TYPE); pub(crate) static FUTURE_TYPE: &StaticType = &StaticType { @@ -198,29 +182,21 @@ pub(crate) static FUTURE_TYPE: &StaticType = &StaticType { hash: ::rune_macros::hash!(::std::future::Future), }; -impl_static_type!(rt::Future => FUTURE_TYPE); - pub(crate) static GENERATOR_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Generator"), hash: ::rune_macros::hash!(::std::ops::Generator), }; -impl_static_type!(rt::Generator => GENERATOR_TYPE); - pub(crate) static GENERATOR_STATE_TYPE: &StaticType = &StaticType { name: RawStr::from_str("GeneratorState"), hash: ::rune_macros::hash!(::std::ops::GeneratorState), }; -impl_static_type!(rt::GeneratorState => GENERATOR_STATE_TYPE); - pub(crate) static STREAM_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Stream"), hash: ::rune_macros::hash!(::std::stream::Stream), }; -impl_static_type!(rt::Stream => STREAM_TYPE); - pub(crate) static RESULT_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Result"), hash: ::rune_macros::hash!(::std::result::Result), @@ -240,22 +216,22 @@ pub(crate) static FUNCTION_TYPE: &StaticType = &StaticType { hash: ::rune_macros::hash!(::std::ops::Function), }; -impl_static_type!(rt::Function => FUNCTION_TYPE); -impl_static_type!(impl HashMap => OBJECT_TYPE); - pub(crate) static FORMAT_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Format"), hash: ::rune_macros::hash!(::std::fmt::Format), }; -impl_static_type!(rt::Format => FORMAT_TYPE); - pub(crate) static ITERATOR_TYPE: &StaticType = &StaticType { name: RawStr::from_str("Iterator"), hash: ::rune_macros::hash!(::std::iter::Iterator), }; -impl_static_type!(rt::Iterator => ITERATOR_TYPE); +pub(crate) static ORDERING_TYPE: &StaticType = &StaticType { + name: RawStr::from_str("Ordering"), + hash: ::rune_macros::hash!(::std::cmp::Ordering), +}; + +impl_static_type!(Ordering => ORDERING_TYPE); pub(crate) static TYPE: &StaticType = &StaticType { name: RawStr::from_str("Type"), @@ -263,10 +239,3 @@ pub(crate) static TYPE: &StaticType = &StaticType { }; impl_static_type!(rt::Type => TYPE); - -pub(crate) static ORDERING: &StaticType = &StaticType { - name: RawStr::from_str("Ordering"), - hash: ::rune_macros::hash!(::std::cmp::Ordering), -}; - -impl_static_type!(Ordering => ORDERING); diff --git a/crates/rune/src/runtime/stream.rs b/crates/rune/src/runtime/stream.rs index 4c5f33c93..f8c8787a8 100644 --- a/crates/rune/src/runtime/stream.rs +++ b/crates/rune/src/runtime/stream.rs @@ -1,12 +1,12 @@ use core::fmt; -use crate::compile::Named; -use crate::module::InstallWith; -use crate::runtime::{ - GeneratorState, RawStr, Shared, Value, Vm, VmErrorKind, VmExecution, VmResult, -}; +use crate as rune; +use crate::runtime::{GeneratorState, Shared, Value, Vm, VmErrorKind, VmExecution, VmResult}; +use crate::Any; /// A stream with a stored virtual machine. +#[derive(Any)] +#[rune(builtin, static_type = STREAM_TYPE, from_value = Value::into_stream, from_value_params = [Vm])] pub struct Stream where T: AsRef + AsMut, @@ -81,15 +81,6 @@ impl Stream<&mut Vm> { } } -impl Named for Stream -where - T: AsRef + AsMut, -{ - const BASE_NAME: RawStr = RawStr::from_str("Stream"); -} - -impl InstallWith for Stream where T: AsRef + AsMut {} - impl fmt::Debug for Stream where T: AsRef + AsMut, @@ -100,5 +91,3 @@ where .finish() } } - -from_value!(Stream, into_stream); diff --git a/crates/rune/src/runtime/tuple.rs b/crates/rune/src/runtime/tuple.rs index 823f84a3d..401e54486 100644 --- a/crates/rune/src/runtime/tuple.rs +++ b/crates/rune/src/runtime/tuple.rs @@ -4,14 +4,18 @@ use core::slice; use crate::no_std::prelude::*; -use crate::compile::Named; -use crate::module::InstallWith; +use crate as rune; use crate::runtime::{ - ConstValue, FromValue, Mut, RawMut, RawRef, RawStr, Ref, Shared, ToValue, UnsafeToMut, - UnsafeToRef, Value, VmErrorKind, VmResult, + ConstValue, FromValue, Mut, RawMut, RawRef, Ref, Shared, ToValue, UnsafeToMut, UnsafeToRef, + Value, VmErrorKind, VmResult, }; +#[cfg(feature = "std")] +use crate::runtime::{Hasher, ProtocolCaller}; +use crate::Any; /// The type of a tuple slice. +#[derive(Any)] +#[rune(builtin, static_type = TUPLE_TYPE)] #[repr(transparent)] pub struct Tuple { values: [Value], @@ -42,6 +46,19 @@ impl Tuple { VmResult::Ok(Some(vm_try!(T::from_value(value)))) } + + #[cfg(feature = "std")] + pub(crate) fn hash_with( + &self, + hasher: &mut Hasher, + caller: &mut impl ProtocolCaller, + ) -> VmResult<()> { + for value in self.values.iter() { + vm_try!(value.hash_with(hasher, caller)); + } + + VmResult::Ok(()) + } } impl ops::Deref for Tuple { @@ -80,12 +97,6 @@ impl<'a> IntoIterator for &'a mut Tuple { } } -impl Named for Tuple { - const BASE_NAME: RawStr = RawStr::from_str("Tuple"); -} - -impl InstallWith for Tuple {} - /// Struct representing a dynamic anonymous object. /// /// To access borrowed values of a tuple in native functions, use [`Tuple`]. @@ -318,7 +329,7 @@ impl UnsafeToRef for Tuple { Value::Tuple(tuple) => { let tuple = Ref::map(vm_try!(tuple.into_ref()), |tuple| &**tuple); let (value, guard) = Ref::into_raw(tuple); - VmResult::Ok((&*value, Some(guard))) + VmResult::Ok((value.as_ref(), Some(guard))) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } @@ -333,8 +344,8 @@ impl UnsafeToMut for Tuple { Value::EmptyTuple => VmResult::Ok((Tuple::new_mut(&mut []), None)), Value::Tuple(tuple) => { let tuple = Mut::map(vm_try!(tuple.into_mut()), |tuple| &mut **tuple); - let (value, guard) = Mut::into_raw(tuple); - VmResult::Ok((&mut *value, Some(guard))) + let (mut value, guard) = Mut::into_raw(tuple); + VmResult::Ok((value.as_mut(), Some(guard))) } actual => VmResult::err(VmErrorKind::expected::(vm_try!(actual.type_info()))), } diff --git a/crates/rune/src/runtime/type_info.rs b/crates/rune/src/runtime/type_info.rs index 7e7abd42a..8753564ae 100644 --- a/crates/rune/src/runtime/type_info.rs +++ b/crates/rune/src/runtime/type_info.rs @@ -20,6 +20,7 @@ pub enum TypeInfo { } impl TypeInfo { + #[cfg(feature = "emit")] pub(crate) fn type_hash(&self) -> Hash { match self { TypeInfo::StaticType(ty) => ty.hash, diff --git a/crates/rune/src/runtime/value.rs b/crates/rune/src/runtime/value.rs index 5d56311f2..1b1df07c6 100644 --- a/crates/rune/src/runtime/value.rs +++ b/crates/rune/src/runtime/value.rs @@ -4,6 +4,7 @@ use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; use core::fmt; use core::fmt::Write; use core::hash; +use core::ptr; use crate::no_std::prelude::*; use crate::no_std::sync::Arc; @@ -18,6 +19,8 @@ use crate::runtime::{ RangeTo, RangeToInclusive, RawMut, RawRef, Ref, Shared, Stream, ToValue, Type, TypeInfo, Variant, Vec, Vm, VmError, VmErrorKind, VmIntegerRepr, VmResult, }; +#[cfg(feature = "std")] +use crate::runtime::{Hasher, Tuple}; use crate::{Any, Hash}; use serde::{de, ser, Deserialize, Serialize}; @@ -1049,7 +1052,7 @@ impl Value { /// outlive the returned guard, not the virtual machine the value belongs /// to. #[inline] - pub fn into_any_ptr(self) -> VmResult<(*const T, RawRef)> + pub fn into_any_ptr(self) -> VmResult<(ptr::NonNull, RawRef)> where T: Any, { @@ -1073,7 +1076,7 @@ impl Value { /// outlive the returned guard, not the virtual machine the value belongs /// to. #[inline] - pub fn into_any_mut(self) -> VmResult<(*mut T, RawMut)> + pub fn into_any_mut(self) -> VmResult<(ptr::NonNull, RawMut)> where T: Any, { @@ -1099,7 +1102,7 @@ impl Value { Self::Integer(..) => crate::runtime::static_type::INTEGER_TYPE.hash, Self::Float(..) => crate::runtime::static_type::FLOAT_TYPE.hash, Self::Type(..) => crate::runtime::static_type::TYPE.hash, - Self::Ordering(..) => crate::runtime::static_type::ORDERING.hash, + Self::Ordering(..) => crate::runtime::static_type::ORDERING_TYPE.hash, Self::String(..) => crate::runtime::static_type::STRING_TYPE.hash, Self::Bytes(..) => crate::runtime::static_type::BYTES_TYPE.hash, Self::Vec(..) => crate::runtime::static_type::VEC_TYPE.hash, @@ -1139,7 +1142,7 @@ impl Value { Self::Integer(..) => TypeInfo::StaticType(crate::runtime::static_type::INTEGER_TYPE), Self::Float(..) => TypeInfo::StaticType(crate::runtime::static_type::FLOAT_TYPE), Self::Type(..) => TypeInfo::StaticType(crate::runtime::static_type::TYPE), - Self::Ordering(..) => TypeInfo::StaticType(crate::runtime::static_type::ORDERING), + Self::Ordering(..) => TypeInfo::StaticType(crate::runtime::static_type::ORDERING_TYPE), Self::String(..) => TypeInfo::StaticType(crate::runtime::static_type::STRING_TYPE), Self::Bytes(..) => TypeInfo::StaticType(crate::runtime::static_type::BYTES_TYPE), Self::Vec(..) => TypeInfo::StaticType(crate::runtime::static_type::VEC_TYPE), @@ -1336,6 +1339,73 @@ impl Value { }) } + /// Hash the current value. + #[cfg(feature = "std")] + pub fn hash(&self, hasher: &mut Hasher) -> VmResult<()> { + self.hash_with(hasher, &mut EnvProtocolCaller) + } + + /// Hash the current value. + #[cfg(feature = "std")] + pub(crate) fn hash_with( + &self, + hasher: &mut Hasher, + caller: &mut impl ProtocolCaller, + ) -> VmResult<()> { + match self { + Value::Integer(value) => { + hasher.write_i64(*value); + return VmResult::Ok(()); + } + Value::Byte(value) => { + hasher.write_u8(*value); + return VmResult::Ok(()); + } + // Care must be taken whan hashing floats, to ensure that `hash(v1) + // === hash(v2)` if `eq(v1) === eq(v2)`. Hopefully we accomplish + // this by rejecting NaNs and rectifying subnormal values of zero. + Value::Float(value) => { + if value.is_nan() { + return VmResult::err(VmErrorKind::IllegalFloatOperation { value: *value }); + } + + let zero = *value == 0.0; + hasher.write_f64((zero as u8 as f64) * 0.0 + (!zero as u8 as f64) * *value); + return VmResult::Ok(()); + } + Value::String(string) => { + let string = vm_try!(string.borrow_ref()); + hasher.write_str(&string); + return VmResult::Ok(()); + } + Value::Bytes(bytes) => { + let bytes = vm_try!(bytes.borrow_ref()); + hasher.write(&bytes); + return VmResult::Ok(()); + } + Value::Tuple(tuple) => { + let tuple = vm_try!(tuple.borrow_ref()); + return Tuple::hash_with(&tuple, hasher, caller); + } + Value::Vec(vec) => { + let vec = vm_try!(vec.borrow_ref()); + return Vec::hash_with(&vec, hasher, caller); + } + value => { + match vm_try!(caller.try_call_protocol_fn(Protocol::HASH, value.clone(), (hasher,))) + { + CallResult::Ok(value) => return <()>::from_value(value), + CallResult::Unsupported(..) => {} + } + } + } + + err(VmErrorKind::UnsupportedUnaryOperation { + op: "hash", + operand: vm_try!(self.type_info()), + }) + } + /// Perform a total equality test between two values. /// /// This is the basis for the eq operation (`==`). @@ -1346,19 +1416,15 @@ impl Value { /// # Errors /// /// This function will error if called outside of a virtual machine context. - pub fn eq(a: &Value, b: &Value) -> VmResult { - Value::eq_with(a, b, &mut EnvProtocolCaller) + pub fn eq(&self, b: &Value) -> VmResult { + self.eq_with(b, &mut EnvProtocolCaller) } /// Perform a total equality test between two values. /// /// This is the basis for the eq operation (`==`). - pub(crate) fn eq_with( - a: &Value, - b: &Value, - caller: &mut impl ProtocolCaller, - ) -> VmResult { - match (a, b) { + pub(crate) fn eq_with(&self, b: &Value, caller: &mut impl ProtocolCaller) -> VmResult { + match (self, b) { (Self::Bool(a), Self::Bool(b)) => return VmResult::Ok(a == b), (Self::Byte(a), Self::Byte(b)) => return VmResult::Ok(a == b), (Self::Char(a), Self::Char(b)) => return VmResult::Ok(a == b), @@ -1478,8 +1544,9 @@ impl Value { _ => return VmResult::Ok(false), } } - (a, b) => { - match vm_try!(caller.try_call_protocol_fn(Protocol::EQ, a.clone(), (b.clone(),))) { + _ => { + match vm_try!(caller.try_call_protocol_fn(Protocol::EQ, self.clone(), (b.clone(),))) + { CallResult::Ok(value) => return bool::from_value(value), CallResult::Unsupported(..) => {} } @@ -1488,7 +1555,7 @@ impl Value { err(VmErrorKind::UnsupportedBinaryOperation { op: "eq", - lhs: vm_try!(a.type_info()), + lhs: vm_try!(self.type_info()), rhs: vm_try!(b.type_info()), }) } diff --git a/crates/rune/src/runtime/vec.rs b/crates/rune/src/runtime/vec.rs index 95ba5fe0e..50289a35a 100644 --- a/crates/rune/src/runtime/vec.rs +++ b/crates/rune/src/runtime/vec.rs @@ -10,12 +10,14 @@ use core::slice::SliceIndex; use crate::no_std::prelude::*; use crate::no_std::vec; -use crate::compile::Named; -use crate::module::InstallWith; +use crate as rune; +#[cfg(feature = "std")] +use crate::runtime::Hasher; use crate::runtime::{ - Formatter, FromValue, Iterator, ProtocolCaller, RawRef, RawStr, Ref, Shared, ToValue, - UnsafeToRef, Value, VmErrorKind, VmResult, + Formatter, FromValue, Iterator, ProtocolCaller, RawRef, Ref, Shared, ToValue, UnsafeToRef, + Value, VmErrorKind, VmResult, }; +use crate::Any; use self::iter::Iter; @@ -36,8 +38,9 @@ use self::iter::Iter; /// assert_eq!(None::, vec.get_value(2).into_result()?); /// # Ok::<_, rune::Error>(()) /// ``` -#[derive(Clone)] +#[derive(Clone, Any)] #[repr(transparent)] +#[rune(builtin, static_type = VEC_TYPE, from_value = Value::into_vec)] pub struct Vec { inner: vec::Vec, } @@ -378,6 +381,19 @@ impl Vec { VmResult::Ok(Some(Value::vec(values.to_vec()))) } + + #[cfg(feature = "std")] + pub(crate) fn hash_with( + &self, + hasher: &mut Hasher, + caller: &mut impl ProtocolCaller, + ) -> VmResult<()> { + for value in self.inner.iter() { + vm_try!(value.hash_with(hasher, caller)); + } + + VmResult::Ok(()) + } } impl fmt::Debug for Vec { @@ -448,14 +464,6 @@ impl From> for Vec { } } -impl Named for Vec { - const BASE_NAME: RawStr = RawStr::from_str("Vec"); -} - -impl InstallWith for Vec {} - -from_value!(Vec, into_vec); - impl FromValue for vec::Vec where T: FromValue, @@ -480,9 +488,9 @@ impl UnsafeToRef for [Value] { unsafe fn unsafe_to_ref<'a>(value: Value) -> VmResult<(&'a Self, Self::Guard)> { let vec = vm_try!(value.into_vec()); let (vec, guard) = Ref::into_raw(vm_try!(vec.into_ref())); - // Safety: we're holding onto the guard for the vector here, so it is + // SAFETY: we're holding onto the guard for the vector here, so it is // live. - VmResult::Ok(((*vec).as_slice(), guard)) + VmResult::Ok((vec.as_ref().as_slice(), guard)) } } diff --git a/crates/rune/src/runtime/vm_error.rs b/crates/rune/src/runtime/vm_error.rs index 8a91a1120..1322c2687 100644 --- a/crates/rune/src/runtime/vm_error.rs +++ b/crates/rune/src/runtime/vm_error.rs @@ -7,8 +7,8 @@ use crate::compile::ItemBuf; use crate::hash::Hash; use crate::runtime::unit::{BadInstruction, BadJump}; use crate::runtime::{ - AccessError, BoxedPanic, CallFrame, ExecutionState, FullTypeOf, Key, MaybeTypeOf, Panic, - StackError, TypeInfo, TypeOf, Unit, Vm, VmHaltInfo, + AccessError, BoxedPanic, CallFrame, ExecutionState, FullTypeOf, MaybeTypeOf, Panic, StackError, + TypeInfo, TypeOf, Unit, Vm, VmHaltInfo, }; /// Trait used to convert result types to [`VmResult`]. @@ -85,6 +85,7 @@ pub struct VmErrorLocation { #[non_exhaustive] pub struct VmErrorAt { /// Index into the backtrace which contains information of what caused this error. + #[cfg(feature = "emit")] index: usize, /// The kind of error. kind: VmErrorKind, @@ -92,6 +93,7 @@ pub struct VmErrorAt { impl VmErrorAt { /// Get the instruction which caused the error. + #[cfg(feature = "emit")] pub(crate) fn index(&self) -> usize { self.index } @@ -255,9 +257,11 @@ impl VmResult { match self { Self::Ok(ok) => Self::Ok(ok), Self::Err(mut err) => { + #[cfg(feature = "emit")] let index = err.inner.stacktrace.len(); err.inner.chain.push(VmErrorAt { + #[cfg(feature = "emit")] index, kind: VmErrorKind::from(error()), }); @@ -362,6 +366,7 @@ where Self { inner: Box::new(VmErrorInner { error: VmErrorAt { + #[cfg(feature = "emit")] index: 0, kind: VmErrorKind::from(error), }, @@ -386,12 +391,17 @@ impl From<[VmErrorKind; N]> for VmError { let mut chain = Vec::with_capacity(it.len()); for kind in it { - chain.push(VmErrorAt { index: 0, kind }); + chain.push(VmErrorAt { + #[cfg(feature = "emit")] + index: 0, + kind, + }); } Self { inner: Box::new(VmErrorInner { error: VmErrorAt { + #[cfg(feature = "emit")] index: 0, kind: first, }, @@ -529,9 +539,9 @@ pub(crate) enum VmErrorKind { target: TypeInfo, index: VmIntegerRepr, }, + #[cfg(feature = "std")] MissingIndexKey { target: TypeInfo, - index: Key, }, OutOfRange { index: VmIntegerRepr, @@ -613,6 +623,10 @@ pub(crate) enum VmErrorKind { lhs: f64, rhs: f64, }, + #[cfg(feature = "std")] + IllegalFloatOperation { + value: f64, + }, MissingCallFrame, } @@ -656,10 +670,13 @@ impl fmt::Display for VmErrorKind { "Instruction pointer `{ip}` is out-of-bounds `0-{length}`", ), VmErrorKind::UnsupportedBinaryOperation { op, lhs, rhs } => { - write!(f, "Unsupported operation `{lhs} {op} {rhs}`",) + write!( + f, + "Unsupported binary operation `{op}` on `{lhs}` and `{rhs}`", + ) } VmErrorKind::UnsupportedUnaryOperation { op, operand } => { - write!(f, "Unsupported operation `{op}{operand}`",) + write!(f, "Unsupported unary operation `{op}` on {operand}",) } VmErrorKind::MissingStaticString { slot } => { write!(f, "Static string slot `{slot}` does not exist",) @@ -722,10 +739,11 @@ impl fmt::Display for VmErrorKind { write!(f, "Type `{target}` missing index",) } VmErrorKind::MissingIndexInteger { target, index } => { - write!(f, "Type `{target}` missing index `{index}`",) + write!(f, "Type `{target}` missing integer index `{index}`",) } - VmErrorKind::MissingIndexKey { target, index } => { - write!(f, "Type `{target}` missing index `{index:?}`",) + #[cfg(feature = "std")] + VmErrorKind::MissingIndexKey { target } => { + write!(f, "Type `{target}` missing index",) } VmErrorKind::OutOfRange { index, length } => write!( f, @@ -806,6 +824,10 @@ impl fmt::Display for VmErrorKind { "Cannot perform a comparison of the floats {lhs} and {rhs}", ) } + #[cfg(feature = "std")] + VmErrorKind::IllegalFloatOperation { value } => { + write!(f, "Cannot perform operation on float `{value}`",) + } VmErrorKind::MissingCallFrame => { write!(f, "Missing call frame for internal vm call") } diff --git a/crates/rune/src/tests.rs b/crates/rune/src/tests.rs index 8cfd5694c..eb41f7eea 100644 --- a/crates/rune/src/tests.rs +++ b/crates/rune/src/tests.rs @@ -89,7 +89,7 @@ pub fn vm( .expect("Emit diagnostics"); let buffer = String::from_utf8(buffer.into_inner()).expect("Non utf-8 output"); - return Err(RunError::BuildError(buffer)) + return Err(RunError::BuildError(buffer)); }; let context = Arc::new(context.runtime()); diff --git a/crates/rune/src/tests/bug_344.rs b/crates/rune/src/tests/bug_344.rs index 4cdfa2c55..3701d5da3 100644 --- a/crates/rune/src/tests/bug_344.rs +++ b/crates/rune/src/tests/bug_344.rs @@ -217,12 +217,12 @@ impl UnsafeToRef for GuardCheck { let guard = Guard { _guard: guard, - // Safety: regardless of what happens, the value is available here - // and the refcounted value will be available even if the underlying - // value *is* dropped prematurely because it's been cloned. - dropped: unsafe { (*output).dropped.clone() }, + // Regardless of what happens, the value is available here and the + // refcounted value will be available even if the underlying value + // *is* dropped prematurely because it's been cloned. + dropped: output.as_ref().dropped.clone(), }; - VmResult::Ok((&*output, guard)) + VmResult::Ok((output.as_ref(), guard)) } } diff --git a/crates/rune/src/tests/compiler_general.rs b/crates/rune/src/tests/compiler_general.rs index 13b6ca1d7..6fef01e0b 100644 --- a/crates/rune/src/tests/compiler_general.rs +++ b/crates/rune/src/tests/compiler_general.rs @@ -38,8 +38,8 @@ fn test_pointers() { #[test] fn test_template_strings() { - assert_parse!(r#"pub fn main() { `hello \`` }"#); - assert_parse!(r#"pub fn main() { `hello \$` }"#); + assert_parse!(r"pub fn main() { `hello \`` }"); + assert_parse!(r"pub fn main() { `hello \$` }"); } #[test] diff --git a/tools/import_hashbrown.ps1 b/tools/import_hashbrown.ps1 new file mode 100644 index 000000000..1180c3afa --- /dev/null +++ b/tools/import_hashbrown.ps1 @@ -0,0 +1,8 @@ +$Path = "D:\Repo\hashbrown" +Copy-Item $Path\src\raw\ -Destination crates\rune\src\hashbrown\fork\ -Recurse -Force +Copy-Item $Path\src\scopeguard.rs -Destination crates\rune\src\hashbrown\fork\scopeguard.rs -Force +Copy-Item $Path\src\macros.rs -Destination crates\rune\src\hashbrown\fork\macros.rs -Force + +$template = Get-Content -Path crates\rune\src\hashbrown\fork\raw\mod.rs -Encoding UTF8 -Raw +$template = $template -replace 'crate::(?!alloc)', 'crate::hashbrown::fork::' +Set-Content -Path crates\rune\src\hashbrown\fork\raw\mod.rs -Value $template -Encoding UTF8