diff --git a/Cargo.lock.msrv b/Cargo.lock.msrv index c464aa689..1e4bce45a 100644 --- a/Cargo.lock.msrv +++ b/Cargo.lock.msrv @@ -1531,6 +1531,7 @@ dependencies = [ "bytes", "chrono", "criterion", + "lazy_static", "lz4_flex", "num-bigint 0.3.3", "num-bigint 0.4.4", @@ -1538,10 +1539,12 @@ dependencies = [ "secrecy", "serde", "snap", + "stable_deref_trait", "thiserror", "time", "tokio", "uuid", + "yoke", ] [[package]] @@ -1677,6 +1680,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -1729,6 +1738,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "termcolor" version = "1.2.0" @@ -2325,6 +2345,30 @@ dependencies = [ "memchr", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.32" @@ -2345,6 +2389,27 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", + "synstructure", +] + [[package]] name = "zeroize" version = "1.6.0" diff --git a/examples/cqlsh-rs.rs b/examples/cqlsh-rs.rs index 8d9ca8ea6..c12b17a76 100644 --- a/examples/cqlsh-rs.rs +++ b/examples/cqlsh-rs.rs @@ -4,7 +4,7 @@ use rustyline::error::ReadlineError; use rustyline::{CompletionType, Config, Context, Editor}; use rustyline_derive::{Helper, Highlighter, Hinter, Validator}; use scylla::transport::Compression; -use scylla::{QueryResult, Session, SessionBuilder}; +use scylla::{LegacyQueryResult, Session, SessionBuilder}; use std::env; #[derive(Helper, Highlighter, Validator, Hinter)] @@ -173,7 +173,7 @@ impl Completer for CqlHelper { } } -fn print_result(result: &QueryResult) { +fn print_result(result: &LegacyQueryResult) { if result.rows.is_none() { println!("OK"); return; diff --git a/examples/tower.rs b/examples/tower.rs index b45b08ae1..0d28407da 100644 --- a/examples/tower.rs +++ b/examples/tower.rs @@ -12,7 +12,7 @@ struct SessionService { // A trivial service implementation for sending parameterless simple string requests to Scylla. impl Service for SessionService { - type Response = scylla::QueryResult; + type Response = scylla::LegacyQueryResult; type Error = scylla::transport::errors::QueryError; type Future = Pin>>>; diff --git a/examples/tracing.rs b/examples/tracing.rs index d742de7e5..12767de5b 100644 --- a/examples/tracing.rs +++ b/examples/tracing.rs @@ -8,8 +8,8 @@ use scylla::statement::{ prepared_statement::PreparedStatement, query::Query, Consistency, SerialConsistency, }; use scylla::tracing::TracingInfo; -use scylla::transport::iterator::RowIterator; -use scylla::QueryResult; +use scylla::transport::iterator::LegacyRowIterator; +use scylla::LegacyQueryResult; use scylla::{Session, SessionBuilder}; use std::env; use std::num::NonZeroU32; @@ -42,7 +42,7 @@ async fn main() -> Result<()> { query.set_serial_consistency(Some(SerialConsistency::LocalSerial)); // QueryResult will contain a tracing_id which can be used to query tracing information - let query_result: QueryResult = session.query_unpaged(query.clone(), &[]).await?; + let query_result: LegacyQueryResult = session.query_unpaged(query.clone(), &[]).await?; let query_tracing_id: Uuid = query_result .tracing_id .ok_or_else(|| anyhow!("Tracing id is None!"))?; @@ -79,14 +79,14 @@ async fn main() -> Result<()> { // To trace execution of a prepared statement tracing must be enabled for it prepared.set_tracing(true); - let execute_result: QueryResult = session.execute_unpaged(&prepared, &[]).await?; + let execute_result: LegacyQueryResult = session.execute_unpaged(&prepared, &[]).await?; println!("Execute tracing id: {:?}", execute_result.tracing_id); // PAGED QUERY_ITER EXECUTE_ITER // It's also possible to trace paged queries like query_iter or execute_iter // After iterating through all rows iterator.get_tracing_ids() will give tracing ids // for all page queries - let mut row_iterator: RowIterator = session.query_iter(query, &[]).await?; + let mut row_iterator: LegacyRowIterator = session.query_iter(query, &[]).await?; while let Some(_row) = row_iterator.next().await { // Receive rows @@ -105,7 +105,7 @@ async fn main() -> Result<()> { batch.set_tracing(true); // Run the batch and print its tracing_id - let batch_result: QueryResult = session.batch(&batch, ((),)).await?; + let batch_result: LegacyQueryResult = session.batch(&batch, ((),)).await?; println!("Batch tracing id: {:?}\n", batch_result.tracing_id); // CUSTOM diff --git a/scylla-cql/Cargo.toml b/scylla-cql/Cargo.toml index 499b1ddd4..fa0787f99 100644 --- a/scylla-cql/Cargo.toml +++ b/scylla-cql/Cargo.toml @@ -27,10 +27,13 @@ lz4_flex = { version = "0.11.1" } async-trait = "0.1.57" serde = { version = "1.0", features = ["derive"], optional = true } time-03 = { package = "time", version = "0.3", optional = true } +yoke = { version = "0.7", features = ["derive"] } +stable_deref_trait = "1.2" [dev-dependencies] assert_matches = "1.5.0" criterion = "0.4" # Note: v0.5 needs at least rust 1.70.0 +lazy_static = "1" # We can migrate to std::sync::LazyLock once MSRV is bumped to 1.80. # Use large-dates feature to test potential edge cases time-03 = { package = "time", version = "0.3.21", features = ["large-dates"] } uuid = { version = "1.0", features = ["v4"] } diff --git a/scylla-cql/src/frame/response/result.rs b/scylla-cql/src/frame/response/result.rs index 71d91db9a..3a351b434 100644 --- a/scylla-cql/src/frame/response/result.rs +++ b/scylla-cql/src/frame/response/result.rs @@ -10,13 +10,15 @@ use crate::frame::types; use crate::frame::value::{ Counter, CqlDate, CqlDecimal, CqlDuration, CqlTime, CqlTimestamp, CqlTimeuuid, CqlVarint, }; -use crate::types::deserialize::result::{RowIterator, TypedRowIterator}; +use crate::types::deserialize::result::{RawRowIterator, TypedRowIterator}; +use crate::types::deserialize::row::DeserializeRow; use crate::types::deserialize::value::{ mk_deser_err, BuiltinDeserializationErrorKind, DeserializeValue, MapIterator, UdtIterator, }; -use crate::types::deserialize::{DeserializationError, FrameSlice}; +use crate::types::deserialize::{DeserializationError, FrameSlice, TypeCheckError}; use bytes::{Buf, Bytes}; use std::borrow::Cow; +use std::fmt::Debug; use std::sync::Arc; use std::{net::IpAddr, result::Result as StdResult, str}; use uuid::Uuid; @@ -491,7 +493,7 @@ impl ColumnSpec<'static> { impl<'frame> ColumnSpec<'frame> { #[inline] - pub fn borrowed( + pub const fn borrowed( name: &'frame str, typ: ColumnType<'frame>, table_spec: TableSpec<'frame>, @@ -526,6 +528,18 @@ pub struct ResultMetadata<'a> { } impl<'a> ResultMetadata<'a> { + #[inline] + pub fn col_count(&self) -> usize { + self.col_count + } + + #[inline] + pub fn col_specs(&self) -> &[ColumnSpec<'a>] { + &self.col_specs + } + + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty ResultMetadata. #[inline] pub fn mock_empty() -> Self { Self { @@ -533,24 +547,36 @@ impl<'a> ResultMetadata<'a> { col_specs: Vec::new(), } } +} - #[inline] - #[doc(hidden)] - pub fn new_for_test(col_count: usize, col_specs: Vec>) -> Self { - Self { - col_count, - col_specs, - } - } +/// Versatile container for [ResultMetadata]. Allows 2 types of ownership +/// of `ResultMetadata`: +/// 1. owning it in a borrowed form, self-borrowed from the RESULT:Rows frame; +/// 2. sharing ownership of metadata cached in PreparedStatement. +#[derive(Debug)] +pub enum ResultMetadataHolder { + SelfBorrowed(SelfBorrowedMetadataContainer), + SharedCached(Arc>), +} +impl ResultMetadataHolder { + /// Returns reference to the stored [ResultMetadata]. + /// + /// Note that [ResultMetadataHolder] cannot implement [Deref](std::ops::Deref), + /// because `Deref` does not permit that `Deref::Target`'s lifetime depend on + /// lifetime of `&self`. #[inline] - pub fn col_count(&self) -> usize { - self.col_count + pub fn inner(&self) -> &ResultMetadata<'_> { + match self { + ResultMetadataHolder::SelfBorrowed(c) => c.metadata(), + ResultMetadataHolder::SharedCached(s) => s, + } } + /// Creates an empty [ResultMetadataHolder]. #[inline] - pub fn col_specs(&self) -> &[ColumnSpec<'a>] { - &self.col_specs + pub fn mock_empty() -> Self { + Self::SelfBorrowed(SelfBorrowedMetadataContainer::mock_empty()) } } @@ -584,20 +610,238 @@ impl Row { } } +/// RESULT:Rows response, in partially serialized form. +/// +/// Flags and paging state are deserialized, remaining part of metadata +/// as well as rows remain serialized. #[derive(Debug)] -pub struct Rows { - pub metadata: Arc>, - pub paging_state_response: PagingStateResponse, - pub rows_count: usize, - pub rows: Vec, - /// Original size of the serialized rows. - pub serialized_size: usize, +pub struct RawMetadataAndRawRows { + // Already deserialized part of metadata: + col_count: usize, + global_tables_spec: bool, + no_metadata: bool, + + /// The remaining part of the RESULT frame. + raw_metadata_and_rows: Bytes, + + /// Metadata cached in PreparedStatement, if present. + cached_metadata: Option>>, +} + +impl RawMetadataAndRawRows { + /// Creates an empty [RawMetadataAndRawRows]. + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty RawMetadataAndRawRows. + #[inline] + pub fn mock_empty() -> Self { + // Minimal correct `raw_metadata_and_rows` looks like this: + // Empty metadata (0 bytes), rows_count=0 (i32 big endian), empty rows (0 bytes). + static EMPTY_METADATA_ZERO_ROWS: &[u8] = &0_i32.to_be_bytes(); + let raw_metadata_and_rows = Bytes::from_static(EMPTY_METADATA_ZERO_ROWS); + + Self { + col_count: 0, + global_tables_spec: false, + no_metadata: false, + raw_metadata_and_rows, + cached_metadata: None, + } + } + + /// Returns the serialized size of the raw metadata + raw rows. + #[inline] + pub fn metadata_and_rows_bytes_size(&self) -> usize { + self.raw_metadata_and_rows.len() + } +} + +mod self_borrowed_metadata { + use std::ops::Deref; + + use bytes::Bytes; + use yoke::{Yoke, Yokeable}; + + use super::ResultMetadata; + + // A trivial wrapper over Bytes, introduced to circumvent the orphan rule. + // (neither `bytes` nor `stable_deref_trait` crate wants to implement + // `StableDeref` for `Bytes`, so we need a wrapper for that) + #[derive(Debug, Clone)] + struct BytesWrapper { + inner: Bytes, + } + + impl Deref for BytesWrapper { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.inner + } + } + + // SAFETY: + // StableDeref requires that a type dereferences to a stable address, even when moved. + // `Bytes` satisfy this requirement, because they dereference to their heap allocation. + unsafe impl stable_deref_trait::StableDeref for BytesWrapper {} + + // SAFETY: + // Citing `CloneableCart`'s docstring: + // > Safety + // > This trait is safe to implement on StableDeref types which, once Cloned, point to the same underlying data and retain ownership. + // + // `Bytes` satisfy this requirement. + unsafe impl yoke::CloneableCart for BytesWrapper {} + + // A trivial wrapper over [ResultMetadata], introduced to keep ResultMetadata free of Yoke. + // This way Yoke does not appear in any public types/APIs. + #[derive(Debug, Clone, Yokeable)] + struct ResultMetadataWrapper<'frame>(ResultMetadata<'frame>); + + /// A container that can be considered an `Arc` with an additional capability + /// of containing metadata in a borrowed form. + /// + /// The borrow comes from the `Bytes` that this container holds internally. Therefore, + /// the held `ResultMetadata`'s lifetime is covariant with the lifetime of this container + /// itself. + #[derive(Debug, Clone)] + pub struct SelfBorrowedMetadataContainer { + metadata_and_raw_rows: Yoke, BytesWrapper>, + } + + impl SelfBorrowedMetadataContainer { + /// Creates an empty [SelfBorrowedMetadataContainer]. + pub fn mock_empty() -> Self { + Self { + metadata_and_raw_rows: Yoke::attach_to_cart( + BytesWrapper { + inner: Bytes::new(), + }, + |_| ResultMetadataWrapper(ResultMetadata::mock_empty()), + ), + } + } + + /// Returns a reference to the contained [ResultMetadata]. + pub fn metadata(&self) -> &ResultMetadata<'_> { + &self.metadata_and_raw_rows.get().0 + } + + // Returns Self (deserialized metadata) and the rest of the bytes, + // which contain rows count and then rows themselves. + pub(super) fn make_deserialized_metadata( + frame: Bytes, + deserializer: F, + ) -> Result<(Self, Bytes), ErrorT> + where + // This constraint is modelled after `Yoke::try_attach_to_cart`. + F: for<'frame> FnOnce(&mut &'frame [u8]) -> Result, ErrorT>, + { + let deserialized_metadata_and_raw_rows: Yoke< + (ResultMetadataWrapper<'static>, &'static [u8]), + BytesWrapper, + > = Yoke::try_attach_to_cart(BytesWrapper { inner: frame }, |mut slice| { + let metadata = deserializer(&mut slice)?; + let row_count_and_raw_rows = slice; + Ok((ResultMetadataWrapper(metadata), row_count_and_raw_rows)) + })?; + + let (_metadata, raw_rows) = deserialized_metadata_and_raw_rows.get(); + let raw_rows_with_count = deserialized_metadata_and_raw_rows + .backing_cart() + .inner + .slice_ref(raw_rows); + + Ok(( + Self { + metadata_and_raw_rows: deserialized_metadata_and_raw_rows + .map_project(|(metadata, _), _| metadata), + }, + raw_rows_with_count, + )) + } + } +} +pub use self_borrowed_metadata::SelfBorrowedMetadataContainer; + +/// RESULT:Rows response, in partially serialized form. +/// +/// Paging state and metadata are deserialized, rows remain serialized. +#[derive(Debug)] +pub struct DeserializedMetadataAndRawRows { + metadata: ResultMetadataHolder, + rows_count: usize, + raw_rows: Bytes, +} + +impl DeserializedMetadataAndRawRows { + /// Returns the metadata associated with this response + /// (table and column specifications). + #[inline] + pub fn metadata(&self) -> &ResultMetadata<'_> { + self.metadata.inner() + } + + /// Consumes the `DeserializedMetadataAndRawRows` and returns metadata + /// associated with the response (or cached metadata, if used in its stead). + #[inline] + pub fn into_metadata(self) -> ResultMetadataHolder { + self.metadata + } + + /// Returns the number of rows that the RESULT:Rows contain. + #[inline] + pub fn rows_count(&self) -> usize { + self.rows_count + } + + /// Returns the serialized size of the raw rows. + #[inline] + pub fn rows_bytes_size(&self) -> usize { + self.raw_rows.len() + } + + // Preferred to implementing Default, because users shouldn't be encouraged to create + // empty DeserializedMetadataAndRawRows. + #[inline] + pub fn mock_empty() -> Self { + Self { + metadata: ResultMetadataHolder::SelfBorrowed( + SelfBorrowedMetadataContainer::mock_empty(), + ), + rows_count: 0, + raw_rows: Bytes::new(), + } + } + + pub(crate) fn into_inner(self) -> (ResultMetadataHolder, usize, Bytes) { + (self.metadata, self.rows_count, self.raw_rows) + } + + /// Creates a typed iterator over the rows that lazily deserializes + /// rows in the result. + /// + /// Returns Err if the schema of returned result doesn't match R. + #[inline] + pub fn rows_iter<'frame, 'metadata, R: DeserializeRow<'frame, 'metadata>>( + &'frame self, + ) -> StdResult, TypeCheckError> + where + 'frame: 'metadata, + { + let frame_slice = FrameSlice::new(&self.raw_rows); + let raw = RawRowIterator::new( + self.rows_count, + self.metadata.inner().col_specs(), + frame_slice, + ); + TypedRowIterator::new(raw) + } } #[derive(Debug)] pub enum Result { Void, - Rows(Rows), + Rows((RawMetadataAndRawRows, PagingStateResponse)), SetKeyspace(SetKeyspace), Prepared(Prepared), SchemaChange(SchemaChange), @@ -616,7 +860,7 @@ fn deser_type_generic<'frame, 'result, StrT: Into>>( // Chances are the underlying string is `...DurationType`, in which case // we don't need to allocate it at all. Only for Custom types // (which we don't support anyway) do we need to allocate. - // OTOH, the macro argument function deserializes borrowed OR owned string; + // OTOH, the provided `read_string` function deserializes borrowed OR owned string; // here we want to always deserialize borrowed string. let type_str = types::read_string(buf).map_err(CqlTypeParseError::CustomTypeNameParseError)?; @@ -692,7 +936,7 @@ fn deser_type_generic<'frame, 'result, StrT: Into>>( }) } -fn _deser_type_borrowed<'frame>( +fn deser_type_borrowed<'frame>( buf: &mut &'frame [u8], ) -> StdResult, CqlTypeParseError> { deser_type_generic(buf, |buf| types::read_string(buf)) @@ -780,15 +1024,6 @@ fn deser_table_spec_for_col_spec<'frame>( Ok(table_spec) } -/// Deserializes col specs (part of ResultMetadata or PreparedMetadata) -/// in the form mentioned by its name. -/// -/// Checks for equality of table specs across columns, because the protocol -/// does not guarantee that and we want to be sure that the assumption -/// of them being all the same is correct. -/// -/// To avoid needless allocations, it is advised to pass `global_table_spec` -/// in the borrowed form, so that cloning it is cheap. fn deser_col_specs_generic<'frame, 'result>( buf: &mut &'frame [u8], global_table_spec: Option>, @@ -816,7 +1051,16 @@ fn deser_col_specs_generic<'frame, 'result>( Ok(col_specs) } -fn _deser_col_specs_borrowed<'frame>( +/// Deserializes col specs (part of ResultMetadata or PreparedMetadata) +/// in the borrowed form. +/// +/// Checks for equality of table specs across columns, because the protocol +/// does not guarantee that and we want to be sure that the assumption +/// of them being all the same is correct. +/// +/// To avoid needless allocations, it is advised to pass `global_table_spec` +/// in the borrowed form, so that cloning it is cheap. +fn deser_col_specs_borrowed<'frame>( buf: &mut &'frame [u8], global_table_spec: Option>, col_count: usize, @@ -826,10 +1070,19 @@ fn _deser_col_specs_borrowed<'frame>( global_table_spec, col_count, ColumnSpec::borrowed, - _deser_type_borrowed, + deser_type_borrowed, ) } +/// Deserializes col specs (part of ResultMetadata or PreparedMetadata) +/// in the owned form. +/// +/// Checks for equality of table specs across columns, because the protocol +/// does not guarantee that and we want to be sure that the assumption +/// of them being all the same is correct. +/// +/// To avoid needless allocations, it is advised to pass `global_table_spec` +/// in the borrowed form, so that cloning it is cheap. fn deser_col_specs_owned<'frame>( buf: &mut &'frame [u8], global_table_spec: Option>, @@ -883,6 +1136,142 @@ fn deser_result_metadata( Ok((metadata, paging_state)) } +impl RawMetadataAndRawRows { + /// Deserializes flags and paging state; the other part of result metadata + /// as well as rows remain serialized. + fn deserialize( + frame: &mut FrameSlice, + cached_metadata: Option>>, + ) -> StdResult<(Self, PagingStateResponse), RowsParseError> { + let flags = types::read_int(frame.as_slice_mut()) + .map_err(|err| ResultMetadataParseError::FlagsParseError(err.into()))?; + let global_tables_spec = flags & 0x0001 != 0; + let has_more_pages = flags & 0x0002 != 0; + let no_metadata = flags & 0x0004 != 0; + + let col_count = types::read_int_length(frame.as_slice_mut()) + .map_err(ResultMetadataParseError::ColumnCountParseError)?; + + let raw_paging_state = has_more_pages + .then(|| { + types::read_bytes(frame.as_slice_mut()) + .map_err(ResultMetadataParseError::PagingStateParseError) + }) + .transpose()?; + + let paging_state = PagingStateResponse::new_from_raw_bytes(raw_paging_state); + + let raw_rows = Self { + col_count, + global_tables_spec, + no_metadata, + raw_metadata_and_rows: frame.to_bytes(), + cached_metadata, + }; + + Ok((raw_rows, paging_state)) + } +} + +impl RawMetadataAndRawRows { + // This function is needed because creating the deserializer closure + // directly in the enclosing function does not provide enough type hints + // for the compiler (and having a function with a verbose signature does), + // so it demands a type annotation. We cannot, however, write a correct + // type annotation, because this way we would limit the lifetime + // to a concrete lifetime, and our closure needs to be `impl for<'frame> ...`. + // This is a proud trick by Wojciech Przytuła, which crowns the brilliant + // idea of Karol Baryła to use Yoke to enable borrowing ResultMetadata + // from itself. + fn metadata_deserializer( + col_count: usize, + global_tables_spec: bool, + ) -> impl for<'frame> FnOnce(&mut &'frame [u8]) -> StdResult, RowsParseError> + { + move |buf| { + let server_metadata = { + let global_table_spec = global_tables_spec + .then(|| deser_table_spec(buf)) + .transpose() + .map_err(ResultMetadataParseError::from)?; + + let col_specs = deser_col_specs_borrowed(buf, global_table_spec, col_count) + .map_err(ResultMetadataParseError::from)?; + + ResultMetadata { + col_count, + col_specs, + } + }; + if server_metadata.col_count() != server_metadata.col_specs().len() { + return Err(RowsParseError::ColumnCountMismatch { + col_count: server_metadata.col_count(), + col_specs_count: server_metadata.col_specs().len(), + }); + } + Ok(server_metadata) + } + } + + /// Deserializes ResultMetadata and deserializes rows count. Keeps rows in the serialized form. + /// + /// If metadata is cached (in the PreparedStatement), it is reused (shared) from cache + /// instead of deserializing. + pub fn deserialize_metadata(self) -> StdResult { + let (metadata_deserialized, row_count_and_raw_rows) = match self.cached_metadata { + Some(cached) if self.no_metadata => { + // Server sent no metadata, but we have metadata cached. This means that we asked the server + // not to send metadata in the response as an optimization. We use cached metadata instead. + ( + ResultMetadataHolder::SharedCached(cached), + self.raw_metadata_and_rows, + ) + } + None if self.no_metadata => { + // Server sent no metadata and we have no metadata cached. Having no metadata cached, + // we wouldn't have asked the server for skipping metadata. Therefore, this is most probably + // not a SELECT, because in such case the server would send empty metadata both in Prepared + // and in Result responses. + ( + ResultMetadataHolder::mock_empty(), + self.raw_metadata_and_rows, + ) + } + Some(_) | None => { + // Two possibilities: + // 1) no cached_metadata provided. Server is supposed to provide the result metadata. + // 2) cached metadata present (so we should have asked for skipping metadata), + // but the server sent result metadata anyway. + // In case 1 we have to deserialize result metadata. In case 2 we choose to do that, + // too, because it's suspicious, so we had better use the new metadata just in case. + // Also, we simply need to advance the buffer pointer past metadata, and this requires + // parsing metadata. + + let (metadata_container, raw_rows_with_count) = + self_borrowed_metadata::SelfBorrowedMetadataContainer::make_deserialized_metadata( + self.raw_metadata_and_rows, + Self::metadata_deserializer(self.col_count, self.global_tables_spec), + )?; + ( + ResultMetadataHolder::SelfBorrowed(metadata_container), + raw_rows_with_count, + ) + } + }; + + let mut frame_slice = FrameSlice::new(&row_count_and_raw_rows); + + let rows_count: usize = types::read_int_length(frame_slice.as_slice_mut()) + .map_err(RowsParseError::RowsCountParseError)?; + + Ok(DeserializedMetadataAndRawRows { + metadata: metadata_deserialized, + rows_count, + raw_rows: frame_slice.to_bytes(), + }) + } +} + fn deser_prepared_metadata( buf: &mut &[u8], ) -> StdResult { @@ -1084,46 +1473,9 @@ pub fn deser_cql_value( fn deser_rows( buf_bytes: Bytes, cached_metadata: Option<&Arc>>, -) -> StdResult { - let buf = &mut &*buf_bytes; - let (server_metadata, paging_state_response) = deser_result_metadata(buf)?; - - let metadata = match cached_metadata { - Some(cached) => Arc::clone(cached), - None => { - // No cached_metadata provided. Server is supposed to provide the result metadata. - if server_metadata.col_count != server_metadata.col_specs.len() { - return Err(RowsParseError::ColumnCountMismatch { - col_count: server_metadata.col_count, - col_specs_count: server_metadata.col_specs.len(), - }); - } - Arc::new(server_metadata) - } - }; - - let original_size = buf.len(); - - let rows_count: usize = - types::read_int_length(buf).map_err(RowsParseError::RowsCountParseError)?; - - let raw_rows_iter = RowIterator::new( - rows_count, - &metadata.col_specs, - FrameSlice::new_borrowed(buf), - ); - let rows_iter = TypedRowIterator::::new(raw_rows_iter) - .map_err(|err| DeserializationError::new(err.0))?; - - let rows = rows_iter.collect::>()?; - - Ok(Rows { - metadata, - paging_state_response, - rows_count, - rows, - serialized_size: original_size - buf.len(), - }) +) -> StdResult<(RawMetadataAndRawRows, PagingStateResponse), RowsParseError> { + let mut frame_slice = FrameSlice::new(&buf_bytes); + RawMetadataAndRawRows::deserialize(&mut frame_slice, cached_metadata.cloned()) } fn deser_set_keyspace(buf: &mut &[u8]) -> StdResult { @@ -1184,6 +1536,227 @@ pub fn deserialize( ) } +// This is not #[cfg(test)], because it is used by scylla crate. +// Unfortunately, this attribute does not apply recursively to +// children item. Therefore, every `pub` item here must use have +// the specifier, too. +#[doc(hidden)] +mod test_utils { + use std::num::TryFromIntError; + + use bytes::{BufMut, BytesMut}; + + use super::*; + + impl TableSpec<'_> { + pub(crate) fn serialize(&self, buf: &mut impl BufMut) -> StdResult<(), TryFromIntError> { + types::write_string(&self.ks_name, buf)?; + types::write_string(&self.table_name, buf)?; + + Ok(()) + } + } + + impl ColumnType<'_> { + fn id(&self) -> u16 { + match self { + Self::Custom(_) => 0x0000, + Self::Ascii => 0x0001, + Self::BigInt => 0x0002, + Self::Blob => 0x0003, + Self::Boolean => 0x0004, + Self::Counter => 0x0005, + Self::Decimal => 0x0006, + Self::Double => 0x0007, + Self::Float => 0x0008, + Self::Int => 0x0009, + Self::Timestamp => 0x000B, + Self::Uuid => 0x000C, + Self::Text => 0x000D, + Self::Varint => 0x000E, + Self::Timeuuid => 0x000F, + Self::Inet => 0x0010, + Self::Date => 0x0011, + Self::Time => 0x0012, + Self::SmallInt => 0x0013, + Self::TinyInt => 0x0014, + Self::Duration => 0x0015, + Self::List(_) => 0x0020, + Self::Map(_, _) => 0x0021, + Self::Set(_) => 0x0022, + Self::UserDefinedType { .. } => 0x0030, + Self::Tuple(_) => 0x0031, + } + } + + // Only for use in tests + pub(crate) fn serialize(&self, buf: &mut impl BufMut) -> StdResult<(), TryFromIntError> { + let id = self.id(); + types::write_short(id, buf); + + match self { + ColumnType::Custom(type_name) => { + types::write_string(type_name, buf)?; + } + + // Simple types + ColumnType::Ascii + | ColumnType::Boolean + | ColumnType::Blob + | ColumnType::Counter + | ColumnType::Date + | ColumnType::Decimal + | ColumnType::Double + | ColumnType::Duration + | ColumnType::Float + | ColumnType::Int + | ColumnType::BigInt + | ColumnType::Text + | ColumnType::Timestamp + | ColumnType::Inet + | ColumnType::SmallInt + | ColumnType::TinyInt + | ColumnType::Time + | ColumnType::Timeuuid + | ColumnType::Uuid + | ColumnType::Varint => (), + + ColumnType::List(elem_type) | ColumnType::Set(elem_type) => { + elem_type.serialize(buf)?; + } + ColumnType::Map(key_type, value_type) => { + key_type.serialize(buf)?; + value_type.serialize(buf)?; + } + ColumnType::Tuple(types) => { + types::write_short_length(types.len(), buf)?; + for typ in types.iter() { + typ.serialize(buf)?; + } + } + ColumnType::UserDefinedType { + type_name, + keyspace, + field_types, + } => { + types::write_string(keyspace, buf)?; + types::write_string(type_name, buf)?; + types::write_short_length(field_types.len(), buf)?; + for (field_name, field_type) in field_types { + types::write_string(field_name, buf)?; + field_type.serialize(buf)?; + } + } + } + + Ok(()) + } + } + + impl<'a> ResultMetadata<'a> { + #[inline] + #[doc(hidden)] + pub fn new_for_test(col_count: usize, col_specs: Vec>) -> Self { + Self { + col_count, + col_specs, + } + } + + pub(crate) fn serialize( + &self, + buf: &mut impl BufMut, + no_metadata: bool, + global_tables_spec: bool, + ) -> StdResult<(), TryFromIntError> { + let global_table_spec = global_tables_spec + .then(|| self.col_specs.first().map(|col_spec| col_spec.table_spec())) + .flatten(); + + let mut flags = 0; + if global_table_spec.is_some() { + flags |= 0x0001; + } + if no_metadata { + flags |= 0x0004; + } + types::write_int(flags, buf); + + types::write_int_length(self.col_count, buf)?; + + // No paging state. + + if !no_metadata { + if let Some(spec) = global_table_spec { + spec.serialize(buf)?; + } + + for col_spec in self.col_specs() { + if global_table_spec.is_none() { + col_spec.table_spec().serialize(buf)?; + } + + types::write_string(col_spec.name(), buf)?; + col_spec.typ().serialize(buf)?; + } + } + + Ok(()) + } + } + + impl RawMetadataAndRawRows { + #[doc(hidden)] + #[inline] + pub fn new_for_test( + cached_metadata: Option>>, + metadata: Option, + global_tables_spec: bool, + rows_count: usize, + raw_rows: &[u8], + ) -> StdResult { + let no_metadata = metadata.is_none(); + let empty_metadata = ResultMetadata::mock_empty(); + let used_metadata = metadata + .as_ref() + .or(cached_metadata.as_deref()) + .unwrap_or(&empty_metadata); + + let raw_result_rows = { + let mut buf = BytesMut::new(); + used_metadata.serialize(&mut buf, global_tables_spec, no_metadata)?; + types::write_int_length(rows_count, &mut buf)?; + buf.extend_from_slice(raw_rows); + + buf.freeze() + }; + + let (raw_rows, _paging_state_response) = + Self::deserialize(&mut FrameSlice::new(&raw_result_rows), cached_metadata).expect( + "Ill-formed serialized metadata for tests - likely bug in serialization code", + ); + + Ok(raw_rows) + } + } + + impl DeserializedMetadataAndRawRows { + #[inline] + #[doc(hidden)] + pub fn new_for_test( + metadata: ResultMetadata<'static>, + rows_count: usize, + raw_rows: Bytes, + ) -> Self { + Self { + metadata: ResultMetadataHolder::SharedCached(Arc::new(metadata)), + rows_count, + raw_rows, + } + } + } +} + #[cfg(test)] mod tests { use crate as scylla; diff --git a/scylla-cql/src/frame/types.rs b/scylla-cql/src/frame/types.rs index e73347039..70f28f6c2 100644 --- a/scylla-cql/src/frame/types.rs +++ b/scylla-cql/src/frame/types.rs @@ -173,7 +173,10 @@ pub fn read_int_length(buf: &mut &[u8]) -> Result Result<(), std::num::TryFromIntError> { +pub(crate) fn write_int_length( + v: usize, + buf: &mut impl BufMut, +) -> Result<(), std::num::TryFromIntError> { let v: i32 = v.try_into()?; write_int(v, buf); @@ -224,7 +227,10 @@ pub(crate) fn read_short_length(buf: &mut &[u8]) -> Result Result<(), std::num::TryFromIntError> { +pub(crate) fn write_short_length( + v: usize, + buf: &mut impl BufMut, +) -> Result<(), std::num::TryFromIntError> { let v: u16 = v.try_into()?; write_short(v, buf); Ok(()) diff --git a/scylla-cql/src/types/deserialize/mod.rs b/scylla-cql/src/types/deserialize/mod.rs index 8d01b7352..affc2c0fc 100644 --- a/scylla-cql/src/types/deserialize/mod.rs +++ b/scylla-cql/src/types/deserialize/mod.rs @@ -342,7 +342,7 @@ mod tests { bytes.freeze() } - pub(super) fn spec<'a>(name: &'a str, typ: ColumnType<'a>) -> ColumnSpec<'a> { + pub(super) const fn spec<'a>(name: &'a str, typ: ColumnType<'a>) -> ColumnSpec<'a> { ColumnSpec::borrowed(name, typ, TableSpec::borrowed("ks", "tbl")) } } diff --git a/scylla-cql/src/types/deserialize/result.rs b/scylla-cql/src/types/deserialize/result.rs index c8c746936..b6ad7a559 100644 --- a/scylla-cql/src/types/deserialize/result.rs +++ b/scylla-cql/src/types/deserialize/result.rs @@ -1,19 +1,23 @@ -use crate::frame::response::result::ColumnSpec; +use bytes::Bytes; + +use crate::frame::response::result::{ + ColumnSpec, DeserializedMetadataAndRawRows, ResultMetadata, ResultMetadataHolder, +}; use super::row::{mk_deser_err, BuiltinDeserializationErrorKind, ColumnIterator, DeserializeRow}; use super::{DeserializationError, FrameSlice, TypeCheckError}; use std::marker::PhantomData; -/// Iterates over the whole result, returning rows. +/// Iterates over the whole result, returning raw rows. #[derive(Debug)] -pub struct RowIterator<'frame, 'metadata> { +pub struct RawRowIterator<'frame, 'metadata> { specs: &'metadata [ColumnSpec<'metadata>], remaining: usize, slice: FrameSlice<'frame>, } -impl<'frame, 'metadata> RowIterator<'frame, 'metadata> { - /// Creates a new iterator over rows from a serialized response. +impl<'frame, 'metadata> RawRowIterator<'frame, 'metadata> { + /// Creates a new iterator over raw rows from a serialized response. /// /// - `remaining` - number of the remaining rows in the serialized response, /// - `specs` - information about columns of the serialized response, @@ -45,7 +49,7 @@ impl<'frame, 'metadata> RowIterator<'frame, 'metadata> { } } -impl<'frame, 'metadata> Iterator for RowIterator<'frame, 'metadata> { +impl<'frame, 'metadata> Iterator for RawRowIterator<'frame, 'metadata> { type Item = Result, DeserializationError>; #[inline] @@ -79,11 +83,11 @@ impl<'frame, 'metadata> Iterator for RowIterator<'frame, 'metadata> { } } -/// A typed version of [RowIterator] which deserializes the rows before +/// A typed version of [RawRowIterator] which deserializes the rows before /// returning them. #[derive(Debug)] pub struct TypedRowIterator<'frame, 'metadata, R> { - inner: RowIterator<'frame, 'metadata>, + inner: RawRowIterator<'frame, 'metadata>, _phantom: PhantomData, } @@ -91,11 +95,11 @@ impl<'frame, 'metadata, R> TypedRowIterator<'frame, 'metadata, R> where R: DeserializeRow<'frame, 'metadata>, { - /// Creates a new [TypedRowIterator] from given [RowIterator]. + /// Creates a new [TypedRowIterator] from given [RawRowIterator]. /// /// Calls `R::type_check` and fails if the type check fails. #[inline] - pub fn new(raw: RowIterator<'frame, 'metadata>) -> Result { + pub fn new(raw: RawRowIterator<'frame, 'metadata>) -> Result { R::type_check(raw.specs())?; Ok(Self { inner: raw, @@ -136,53 +140,227 @@ where } } +// Technically not an iterator because it returns items that borrow from it, +// and the std Iterator interface does not allow for that. +/// A _lending_ iterator over serialized rows. +/// +/// This type is similar to `RawRowIterator`, but keeps ownership of the serialized +/// result. Because it returns `ColumnIterator`s that need to borrow from it, +/// it does not implement the `Iterator` trait (there is no type in the standard +/// library to represent this concept yet). +#[derive(Debug)] +pub struct RawRowLendingIterator { + metadata: ResultMetadataHolder, + remaining: usize, + at: usize, + raw_rows: Bytes, +} + +impl RawRowLendingIterator { + /// Creates a new `RawRowLendingIterator`, consuming given `RawRows`. + #[inline] + pub fn new(raw_rows: DeserializedMetadataAndRawRows) -> Self { + let (metadata, rows_count, raw_rows) = raw_rows.into_inner(); + Self { + metadata, + remaining: rows_count, + at: 0, + raw_rows, + } + } + + /// Returns a `ColumnIterator` that represents the next row. + /// + /// Note: the `ColumnIterator` borrows from the `RawRowLendingIterator`. + /// The column iterator must be consumed before the rows iterator can + /// continue. + #[inline] + #[allow(clippy::should_implement_trait)] // https://github.com/rust-lang/rust-clippy/issues/5004 + pub fn next(&mut self) -> Option> { + self.remaining = self.remaining.checked_sub(1)?; + + // First create the slice encompassing the whole frame. + let mut remaining_frame = FrameSlice::new(&self.raw_rows); + // Then slice it to encompass the remaining suffix of the frame. + *remaining_frame.as_slice_mut() = &remaining_frame.as_slice()[self.at..]; + // Ideally, we would prefer to preserve the FrameSlice between calls to `next()`, + // but borrowing from oneself is impossible, so we have to recreate it this way. + + let iter = ColumnIterator::new(self.metadata.inner().col_specs(), remaining_frame); + + // Skip the row here, manually + for (column_index, spec) in self.metadata.inner().col_specs().iter().enumerate() { + let remaining_frame_len_before_column_read = remaining_frame.as_slice().len(); + if let Err(err) = remaining_frame.read_cql_bytes() { + return Some(Err(mk_deser_err::( + BuiltinDeserializationErrorKind::RawColumnDeserializationFailed { + column_index, + column_name: spec.name().to_owned(), + err: DeserializationError::new(err), + }, + ))); + } else { + let remaining_frame_len_after_column_read = remaining_frame.as_slice().len(); + self.at += + remaining_frame_len_before_column_read - remaining_frame_len_after_column_read; + } + } + + Some(Ok(iter)) + } + + #[inline] + pub fn size_hint(&self) -> (usize, Option) { + // next() is written in a way that it does not progress on error, so once an error + // is encountered, the same error keeps being returned until `self.remaining` + // elements are yielded in total. + (self.remaining, Some(self.remaining)) + } + + /// Returns the metadata associated with the response (paging state and + /// column specifications). + #[inline] + pub fn metadata(&self) -> &ResultMetadata<'_> { + self.metadata.inner() + } + + /// Returns the remaining number of rows that this iterator is expected + /// to produce. + #[inline] + pub fn rows_remaining(&self) -> usize { + self.remaining + } +} + #[cfg(test)] mod tests { + use bytes::Bytes; + use std::ops::Deref; - use crate::frame::response::result::ColumnType; + use crate::frame::response::result::{ + ColumnSpec, ColumnType, DeserializedMetadataAndRawRows, ResultMetadata, + }; use super::super::tests::{serialize_cells, spec, CELL1, CELL2}; - use super::{FrameSlice, RowIterator, TypedRowIterator}; + use super::{ + ColumnIterator, DeserializationError, FrameSlice, RawRowIterator, RawRowLendingIterator, + TypedRowIterator, + }; + + trait LendingIterator { + type Item<'borrow> + where + Self: 'borrow; + fn lend_next(&mut self) -> Option, DeserializationError>>; + } - #[test] - fn test_row_iterator_basic_parse() { - let raw_data = serialize_cells([Some(CELL1), Some(CELL2), Some(CELL2), Some(CELL1)]); - let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let mut iter = RowIterator::new(2, &specs, FrameSlice::new(&raw_data)); - - let mut row1 = iter.next().unwrap().unwrap(); - let c11 = row1.next().unwrap().unwrap(); - assert_eq!(c11.slice.unwrap().as_slice(), CELL1); - let c12 = row1.next().unwrap().unwrap(); - assert_eq!(c12.slice.unwrap().as_slice(), CELL2); - assert!(row1.next().is_none()); - - let mut row2 = iter.next().unwrap().unwrap(); - let c21 = row2.next().unwrap().unwrap(); - assert_eq!(c21.slice.unwrap().as_slice(), CELL2); - let c22 = row2.next().unwrap().unwrap(); - assert_eq!(c22.slice.unwrap().as_slice(), CELL1); - assert!(row2.next().is_none()); + impl<'frame, 'metadata> LendingIterator for RawRowIterator<'frame, 'metadata> { + type Item<'borrow> = ColumnIterator<'borrow, 'borrow> + where + Self: 'borrow; - assert!(iter.next().is_none()); + fn lend_next(&mut self) -> Option, DeserializationError>> { + self.next() + } + } + + impl LendingIterator for RawRowLendingIterator { + type Item<'borrow> = ColumnIterator<'borrow, 'borrow>; + + fn lend_next(&mut self) -> Option, DeserializationError>> { + self.next() + } } #[test] - fn test_row_iterator_too_few_rows() { - let raw_data = serialize_cells([Some(CELL1), Some(CELL2)]); - let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let mut iter = RowIterator::new(2, &specs, FrameSlice::new(&raw_data)); + fn test_row_iterators_basic_parse() { + // Those statics are required because of a compiler bug-limitation about GATs: + // https://blog.rust-lang.org/2022/10/28/gats-stabilization.html#implied-static-requirement-from-higher-ranked-trait-bounds + // the following type higher-ranked lifetime constraint implies 'static lifetime. + // + // I: for<'item> LendingIterator = ColumnIterator<'item>>, + // + // The bug is said to be a lot of effort to fix, so in tests let's just use `lazy_static` + // to obtain 'static references. + // + // `std::sync::LazyLock` is stable since 1.80, so once we bump MSRV enough, + // we will be able to replace `lazy_static` with `LazyLock`. + + static SPECS: &[ColumnSpec<'static>] = + &[spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; + lazy_static::lazy_static! { + static ref RAW_DATA: Bytes = serialize_cells([Some(CELL1), Some(CELL2), Some(CELL2), Some(CELL1)]); + } + let raw_data = RAW_DATA.deref(); + let specs = SPECS; + + let row_iter = RawRowIterator::new(2, specs, FrameSlice::new(raw_data)); + let lending_row_iter = + RawRowLendingIterator::new(DeserializedMetadataAndRawRows::new_for_test( + ResultMetadata::new_for_test(specs.len(), specs.to_vec()), + 2, + raw_data.clone(), + )); + check(row_iter); + check(lending_row_iter); + + fn check(mut iter: I) + where + I: for<'item> LendingIterator = ColumnIterator<'item, 'item>>, + { + let mut row1 = iter.lend_next().unwrap().unwrap(); + let c11 = row1.next().unwrap().unwrap(); + assert_eq!(c11.slice.unwrap().as_slice(), CELL1); + let c12 = row1.next().unwrap().unwrap(); + assert_eq!(c12.slice.unwrap().as_slice(), CELL2); + assert!(row1.next().is_none()); + + let mut row2 = iter.lend_next().unwrap().unwrap(); + let c21 = row2.next().unwrap().unwrap(); + assert_eq!(c21.slice.unwrap().as_slice(), CELL2); + let c22 = row2.next().unwrap().unwrap(); + assert_eq!(c22.slice.unwrap().as_slice(), CELL1); + assert!(row2.next().is_none()); + + assert!(iter.lend_next().is_none()); + } + } - iter.next().unwrap().unwrap(); - assert!(iter.next().unwrap().is_err()); + #[test] + fn test_row_iterators_too_few_rows() { + static SPECS: &[ColumnSpec<'static>] = + &[spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; + lazy_static::lazy_static! { + static ref RAW_DATA: Bytes = serialize_cells([Some(CELL1), Some(CELL2)]); + } + let raw_data = RAW_DATA.deref(); + let specs = SPECS; + + let row_iter = RawRowIterator::new(2, specs, FrameSlice::new(raw_data)); + let lending_row_iter = + RawRowLendingIterator::new(DeserializedMetadataAndRawRows::new_for_test( + ResultMetadata::new_for_test(specs.len(), specs.to_vec()), + 2, + raw_data.clone(), + )); + check(row_iter); + check(lending_row_iter); + + fn check(mut iter: I) + where + I: for<'item> LendingIterator = ColumnIterator<'item, 'item>>, + { + iter.lend_next().unwrap().unwrap(); + iter.lend_next().unwrap().unwrap_err(); + } } #[test] fn test_typed_row_iterator_basic_parse() { let raw_data = serialize_cells([Some(CELL1), Some(CELL2), Some(CELL2), Some(CELL1)]); let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let iter = RowIterator::new(2, &specs, FrameSlice::new(&raw_data)); + let iter = RawRowIterator::new(2, &specs, FrameSlice::new(&raw_data)); let mut iter = TypedRowIterator::<'_, '_, (&[u8], Vec)>::new(iter).unwrap(); let (c11, c12) = iter.next().unwrap().unwrap(); @@ -200,7 +378,7 @@ mod tests { fn test_typed_row_iterator_wrong_type() { let raw_data = Bytes::new(); let specs = [spec("b1", ColumnType::Blob), spec("b2", ColumnType::Blob)]; - let iter = RowIterator::new(0, &specs, FrameSlice::new(&raw_data)); + let iter = RawRowIterator::new(0, &specs, FrameSlice::new(&raw_data)); assert!(TypedRowIterator::<'_, '_, (i32, i64)>::new(iter).is_err()); } } diff --git a/scylla/src/lib.rs b/scylla/src/lib.rs index 92e256259..aaa1506bd 100644 --- a/scylla/src/lib.rs +++ b/scylla/src/lib.rs @@ -209,7 +209,7 @@ pub mod deserialize { /// Deserializing the whole query result contents. pub mod result { - pub use scylla_cql::types::deserialize::result::{RowIterator, TypedRowIterator}; + pub use scylla_cql::types::deserialize::result::TypedRowIterator; } /// Deserializing a row of the query result. @@ -258,7 +258,8 @@ pub use frame::response::cql_to_rust::FromRow; pub use transport::caching_session::CachingSession; pub use transport::execution_profile::ExecutionProfile; -pub use transport::query_result::QueryResult; +pub use transport::legacy_query_result::LegacyQueryResult; +pub use transport::query_result::{QueryResult, QueryRowsResult}; pub use transport::session::{IntoTypedRows, Session, SessionConfig}; pub use transport::session_builder::SessionBuilder; diff --git a/scylla/src/transport/caching_session.rs b/scylla/src/transport/caching_session.rs index e4bca85bd..cbf9d3c6d 100644 --- a/scylla/src/transport/caching_session.rs +++ b/scylla/src/transport/caching_session.rs @@ -3,9 +3,9 @@ use crate::prepared_statement::PreparedStatement; use crate::query::Query; use crate::statement::{PagingState, PagingStateResponse}; use crate::transport::errors::QueryError; -use crate::transport::iterator::RowIterator; +use crate::transport::iterator::LegacyRowIterator; use crate::transport::partitioner::PartitionerName; -use crate::{QueryResult, Session}; +use crate::{LegacyQueryResult, Session}; use bytes::Bytes; use dashmap::DashMap; use futures::future::try_join_all; @@ -75,7 +75,7 @@ where &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session.execute_unpaged(&prepared, values).await @@ -86,7 +86,7 @@ where &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session.execute_iter(prepared, values).await @@ -98,7 +98,7 @@ where query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let query = query.into(); let prepared = self.add_prepared_statement_owned(query).await?; self.session @@ -112,7 +112,7 @@ where &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { let all_prepared: bool = batch .statements .iter() diff --git a/scylla/src/transport/connection.rs b/scylla/src/transport/connection.rs index 942e47698..188984393 100644 --- a/scylla/src/transport/connection.rs +++ b/scylla/src/transport/connection.rs @@ -47,8 +47,9 @@ use std::{ }; use super::errors::{ProtocolError, UseKeyspaceProtocolError}; -use super::iterator::RowIterator; +use super::iterator::{LegacyRowIterator, QueryPager}; use super::locator::tablets::{RawTablet, TabletParsingError}; +use super::query_result::QueryResult; use super::session::AddressTranslator; use super::topology::{PeerEndpoint, UntranslatedEndpoint, UntranslatedPeer}; use super::NodeAddr; @@ -69,7 +70,6 @@ use crate::routing::ShardInfo; use crate::statement::prepared_statement::PreparedStatement; use crate::statement::{Consistency, PageSize, PagingState, PagingStateResponse}; use crate::transport::Compression; -use crate::QueryResult; // Queries for schema agreement const LOCAL_VERSION: &str = "SELECT schema_version FROM system.local WHERE key='local'"; @@ -268,14 +268,11 @@ impl NonErrorQueryResponse { pub(crate) fn into_query_result_and_paging_state( self, ) -> Result<(QueryResult, PagingStateResponse), UserRequestError> { - let (rows, paging_state, metadata, serialized_size) = match self.response { - NonErrorResponse::Result(result::Result::Rows(rs)) => ( - Some(rs.rows), - rs.paging_state_response, - Some(rs.metadata), - rs.serialized_size, - ), - NonErrorResponse::Result(_) => (None, PagingStateResponse::NoMorePages, None, 0), + let (raw_rows, paging_state_response) = match self.response { + NonErrorResponse::Result(result::Result::Rows((rs, paging_state_response))) => { + (Some(rs), paging_state_response) + } + NonErrorResponse::Result(_) => (None, PagingStateResponse::NoMorePages), _ => { return Err(UserRequestError::UnexpectedResponse( self.response.to_response_kind(), @@ -284,14 +281,8 @@ impl NonErrorQueryResponse { }; Ok(( - QueryResult { - rows, - warnings: self.warnings, - tracing_id: self.tracing_id, - metadata, - serialized_size, - }, - paging_state, + QueryResult::new(raw_rows, self.tracing_id, self.warnings), + paging_state_response, )) } @@ -1191,14 +1182,15 @@ impl Connection { pub(crate) async fn query_iter( self: Arc, query: Query, - ) -> Result { + ) -> Result { let consistency = query .config .determine_consistency(self.config.default_consistency); let serial_consistency = query.config.serial_consistency.flatten(); - RowIterator::new_for_connection_query_iter(query, self, consistency, serial_consistency) + QueryPager::new_for_connection_query_iter(query, self, consistency, serial_consistency) .await + .map(QueryPager::into_legacy) } /// Executes a prepared statements and fetches its results over multiple pages, using @@ -1207,13 +1199,13 @@ impl Connection { self: Arc, prepared_statement: PreparedStatement, values: SerializedValues, - ) -> Result { + ) -> Result { let consistency = prepared_statement .config .determine_consistency(self.config.default_consistency); let serial_consistency = prepared_statement.config.serial_consistency.flatten(); - RowIterator::new_for_connection_execute_iter( + QueryPager::new_for_connection_execute_iter( prepared_statement, values, self, @@ -1221,6 +1213,7 @@ impl Connection { serial_consistency, ) .await + .map(QueryPager::into_legacy) } #[allow(dead_code)] @@ -1443,6 +1436,7 @@ impl Connection { let (version_id,) = self .query_unpaged(LOCAL_VERSION) .await? + .into_legacy_result()? .single_row_typed() .map_err(ProtocolError::SchemaVersionFetch)?; Ok(version_id) @@ -2615,6 +2609,8 @@ mod tests { .query_unpaged("SELECT p, v FROM t") .await .unwrap() + .into_legacy_result() + .unwrap() .rows_typed::<(i32, Vec)>() .unwrap() .collect::, _>>() diff --git a/scylla/src/transport/errors.rs b/scylla/src/transport/errors.rs index aee6ab1f3..d95383054 100644 --- a/scylla/src/transport/errors.rs +++ b/scylla/src/transport/errors.rs @@ -32,7 +32,7 @@ use thiserror::Error; use crate::{authentication::AuthError, frame::response}; -use super::query_result::{RowsExpectedError, SingleRowTypedError}; +use super::legacy_query_result::{RowsExpectedError, SingleRowTypedError}; /// Error that occurred during query execution #[derive(Error, Debug, Clone)] diff --git a/scylla/src/transport/iterator.rs b/scylla/src/transport/iterator.rs index 7ae248d80..1ea00c457 100644 --- a/scylla/src/transport/iterator.rs +++ b/scylla/src/transport/iterator.rs @@ -1,7 +1,6 @@ //! Iterators over rows returned by paged queries use std::future::Future; -use std::mem; use std::net::SocketAddr; use std::ops::ControlFlow; use std::pin::Pin; @@ -9,19 +8,25 @@ use std::sync::Arc; use std::task::{Context, Poll}; use futures::Stream; +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::frame::response::result::RawMetadataAndRawRows; use scylla_cql::frame::response::NonErrorResponse; +use scylla_cql::types::deserialize::result::RawRowLendingIterator; +use scylla_cql::types::deserialize::row::{ColumnIterator, DeserializeRow}; +use scylla_cql::types::deserialize::TypeCheckError; use scylla_cql::types::serialize::row::SerializedValues; use std::result::Result; use thiserror::Error; use tokio::sync::mpsc; use super::execution_profile::ExecutionProfileInner; +use super::query_result::ColumnSpecs; use super::session::RequestSpan; use crate::cql_to_rust::{FromRow, FromRowError}; use crate::frame::response::{ result, - result::{ColumnSpec, Row, Rows}, + result::{ColumnSpec, Row}, }; use crate::history::{self, HistoryListener}; use crate::statement::{prepared_statement::PreparedStatement, query::Query}; @@ -36,17 +41,22 @@ use crate::transport::NodeRef; use tracing::{trace, trace_span, warn, Instrument}; use uuid::Uuid; -/// Iterator over rows returned by paged queries\ -/// Allows to easily access rows without worrying about handling multiple pages -pub struct RowIterator { - current_row_idx: usize, - current_page: Rows, - page_receiver: mpsc::Receiver>, - tracing_ids: Vec, +// Like std::task::ready!, but handles the whole stack of Poll>>. +// If it matches Poll::Ready(Some(Ok(_))), then it returns the innermost value, +// otherwise it returns from the surrounding function. +macro_rules! ready_some_ok { + ($e:expr) => { + match $e { + Poll::Ready(Some(Ok(x))) => x, + Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err.into()))), + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + }; } struct ReceivedPage { - rows: Rows, + rows: RawMetadataAndRawRows, tracing_id: Option, } @@ -58,892 +68,1128 @@ pub(crate) struct PreparedIteratorConfig { pub(crate) metrics: Arc, } -/// Fetching pages is asynchronous so `RowIterator` does not implement the `Iterator` trait.\ -/// Instead it uses the asynchronous `Stream` trait -impl Stream for RowIterator { - type Item = Result; +// A separate module is used here so that the parent module cannot construct +// SendAttemptedProof directly. +mod checked_channel_sender { + use scylla_cql::frame::response::result::RawMetadataAndRawRows; + use std::marker::PhantomData; + use tokio::sync::mpsc; + use uuid::Uuid; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut s = self.as_mut(); + use crate::transport::errors::QueryError; - if s.is_current_page_exhausted() { - match Pin::new(&mut s.page_receiver).poll_recv(cx) { - Poll::Ready(Some(Ok(received_page))) => { - s.current_page = received_page.rows; - s.current_row_idx = 0; + use super::ReceivedPage; - if let Some(tracing_id) = received_page.tracing_id { - s.tracing_ids.push(tracing_id); - } - } - Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))), - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - } - } + /// A value whose existence proves that there was an attempt + /// to send an item of type T through a channel. + /// Can only be constructed by ProvingSender::send. + pub(crate) struct SendAttemptedProof(PhantomData); - let idx = s.current_row_idx; - if idx < s.current_page.rows.len() { - let row = mem::take(&mut s.current_page.rows[idx]); - s.current_row_idx += 1; - return Poll::Ready(Some(Ok(row))); - } + /// An mpsc::Sender which returns proofs that it attempted to send items. + pub(crate) struct ProvingSender(mpsc::Sender); - // We probably got a zero-sized page - // Yield, but tell that we are ready - cx.waker().wake_by_ref(); - Poll::Pending + impl From> for ProvingSender { + fn from(s: mpsc::Sender) -> Self { + Self(s) + } } -} -impl RowIterator { - /// Converts this iterator into an iterator over rows parsed as given type - pub fn into_typed(self) -> TypedRowIterator { - TypedRowIterator { - row_iterator: self, - phantom_data: Default::default(), + impl ProvingSender { + pub(crate) async fn send( + &self, + value: T, + ) -> (SendAttemptedProof, Result<(), mpsc::error::SendError>) { + (SendAttemptedProof(PhantomData), self.0.send(value).await) } } - pub(crate) async fn new_for_query( - query: Query, - execution_profile: Arc, - cluster_data: Arc, - metrics: Arc, - ) -> Result { - let (sender, receiver) = mpsc::channel(1); - - let consistency = query - .config - .consistency - .unwrap_or(execution_profile.consistency); - let serial_consistency = query - .config - .serial_consistency - .unwrap_or(execution_profile.serial_consistency); - - let page_size = query.get_validated_page_size(); - - let routing_info = RoutingInfo { - consistency, - serial_consistency, - ..Default::default() - }; - - let retry_session = query - .get_retry_policy() - .map(|rp| &**rp) - .unwrap_or(&*execution_profile.retry_policy) - .new_session(); - - let parent_span = tracing::Span::current(); - let worker_task = async move { - let query_ref = &query; + type ResultPage = Result; - let page_query = |connection: Arc, - consistency: Consistency, - paging_state: PagingState| { - async move { - connection - .query_raw_with_consistency( - query_ref, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - .await - } + impl ProvingSender { + pub(crate) async fn send_empty_page( + &self, + tracing_id: Option, + ) -> ( + SendAttemptedProof, + Result<(), mpsc::error::SendError>, + ) { + let empty_page = ReceivedPage { + rows: RawMetadataAndRawRows::mock_empty(), + tracing_id, }; + self.send(Ok(empty_page)).await + } + } +} - let query_ref = &query; - - let span_creator = move || { - let span = RequestSpan::new_query(&query_ref.contents); - span.record_request_size(0); - span - }; +use checked_channel_sender::{ProvingSender, SendAttemptedProof}; - let worker = RowIteratorWorker { - sender: sender.into(), - page_query, - statement_info: routing_info, - query_is_idempotent: query.config.is_idempotent, - query_consistency: consistency, - retry_session, - execution_profile, - metrics, - paging_state: PagingState::start(), - history_listener: query.config.history_listener.clone(), - current_query_id: None, - current_attempt_id: None, - parent_span, - span_creator, - }; +type PageSendAttemptedProof = SendAttemptedProof>; - worker.work(cluster_data).await - }; +// RowIteratorWorker works in the background to fetch pages +// RowIterator receives them through a channel +struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { + sender: ProvingSender>, - Self::new_from_worker_future(worker_task, receiver).await - } + // Closure used to perform a single page query + // AsyncFn(Arc, Option>) -> Result + page_query: QueryFunc, - pub(crate) async fn new_for_prepared_statement( - config: PreparedIteratorConfig, - ) -> Result { - let (sender, receiver) = mpsc::channel(1); + statement_info: RoutingInfo<'a>, + query_is_idempotent: bool, + query_consistency: Consistency, + retry_session: Box, + execution_profile: Arc, + metrics: Arc, - let consistency = config - .prepared - .config - .consistency - .unwrap_or(config.execution_profile.consistency); - let serial_consistency = config - .prepared - .config - .serial_consistency - .unwrap_or(config.execution_profile.serial_consistency); + paging_state: PagingState, - let page_size = config.prepared.get_validated_page_size(); + history_listener: Option>, + current_query_id: Option, + current_attempt_id: Option, - let retry_session = config - .prepared - .get_retry_policy() - .map(|rp| &**rp) - .unwrap_or(&*config.execution_profile.retry_policy) - .new_session(); + parent_span: tracing::Span, + span_creator: SpanCreatorFunc, +} - let parent_span = tracing::Span::current(); - let worker_task = async move { - let prepared_ref = &config.prepared; - let values_ref = &config.values; +impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +where + QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, + QueryFut: Future>, + SpanCreator: Fn() -> RequestSpan, +{ + // Contract: this function MUST send at least one item through self.sender + async fn work(mut self, cluster_data: Arc) -> PageSendAttemptedProof { + let load_balancer = self.execution_profile.load_balancing_policy.clone(); + let statement_info = self.statement_info.clone(); + let query_plan = + load_balancing::Plan::new(load_balancer.as_ref(), &statement_info, &cluster_data); - let (partition_key, token) = match prepared_ref - .extract_partition_key_and_calculate_token( - prepared_ref.get_partitioner_name(), - values_ref, - ) { - Ok(res) => res.unzip(), - Err(err) => { - let (proof, _res) = ProvingSender::from(sender).send(Err(err)).await; - return proof; - } - }; + let mut last_error: QueryError = QueryError::EmptyPlan; + let mut current_consistency: Consistency = self.query_consistency; - let table_spec = config.prepared.get_table_spec(); - let statement_info = RoutingInfo { - consistency, - serial_consistency, - token, - table: table_spec, - is_confirmed_lwt: config.prepared.is_confirmed_lwt(), - }; + self.log_query_start(); - let page_query = |connection: Arc, - consistency: Consistency, - paging_state: PagingState| async move { - connection - .execute_raw_with_consistency( - prepared_ref, - values_ref, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - .await + 'nodes_in_plan: for (node, shard) in query_plan { + let span = + trace_span!(parent: &self.parent_span, "Executing query", node = %node.address); + // For each node in the plan choose a connection to use + // This connection will be reused for same node retries to preserve paging cache on the shard + let connection: Arc = match node + .connection_for_shard(shard) + .instrument(span.clone()) + .await + { + Ok(connection) => connection, + Err(e) => { + trace!( + parent: &span, + error = %e, + "Choosing connection failed" + ); + last_error = e.into(); + // Broken connection doesn't count as a failed query, don't log in metrics + continue 'nodes_in_plan; + } }; - let serialized_values_size = config.values.buffer_size(); + 'same_node_retries: loop { + trace!(parent: &span, "Execution started"); + // Query pages until an error occurs + let queries_result: Result = self + .query_pages(&connection, current_consistency, node) + .instrument(span.clone()) + .await; - let replicas: Option> = - if let (Some(table_spec), Some(token)) = - (statement_info.table, statement_info.token) - { - Some( - config - .cluster_data - .get_token_endpoints_iter(table_spec, token) - .map(|(node, shard)| (node.clone(), shard)) - .collect(), - ) - } else { - None + last_error = match queries_result { + Ok(proof) => { + trace!(parent: &span, "Query succeeded"); + // query_pages returned Ok, so we are guaranteed + // that it attempted to send at least one page + // through self.sender and we can safely return now. + return proof; + } + Err(error) => { + trace!( + parent: &span, + error = %error, + "Query failed" + ); + error + } }; - let span_creator = move || { - let span = RequestSpan::new_prepared( - partition_key.as_ref().map(|pk| pk.iter()), - token, - serialized_values_size, - ); - if let Some(replicas) = replicas.as_ref() { - span.record_replicas(replicas); - } - span - }; - - let worker = RowIteratorWorker { - sender: sender.into(), - page_query, - statement_info, - query_is_idempotent: config.prepared.config.is_idempotent, - query_consistency: consistency, - retry_session, - execution_profile: config.execution_profile, - metrics: config.metrics, - paging_state: PagingState::start(), - history_listener: config.prepared.config.history_listener.clone(), - current_query_id: None, - current_attempt_id: None, - parent_span, - span_creator, - }; + // Use retry policy to decide what to do next + let query_info = QueryInfo { + error: &last_error, + is_idempotent: self.query_is_idempotent, + consistency: self.query_consistency, + }; - worker.work(config.cluster_data).await - }; + let retry_decision = self.retry_session.decide_should_retry(query_info); + trace!( + parent: &span, + retry_decision = format!("{:?}", retry_decision).as_str() + ); + self.log_attempt_error(&last_error, &retry_decision); + match retry_decision { + RetryDecision::RetrySameNode(cl) => { + self.metrics.inc_retries_num(); + current_consistency = cl.unwrap_or(current_consistency); + continue 'same_node_retries; + } + RetryDecision::RetryNextNode(cl) => { + self.metrics.inc_retries_num(); + current_consistency = cl.unwrap_or(current_consistency); + continue 'nodes_in_plan; + } + RetryDecision::DontRetry => break 'nodes_in_plan, + RetryDecision::IgnoreWriteError => { + warn!("Ignoring error during fetching pages; stopping fetching."); + // If we are here then, most likely, we didn't send + // anything through the self.sender channel. + // Although we are in an awkward situation (_iter + // interface isn't meant for sending writes), + // we must attempt to send something because + // the iterator expects it. + let (proof, _) = self.sender.send_empty_page(None).await; + return proof; + } + }; + } + } - Self::new_from_worker_future(worker_task, receiver).await + // Send last_error to RowIterator - query failed fully + self.log_query_error(&last_error); + let (proof, _) = self.sender.send(Err(last_error)).await; + proof } - pub(crate) async fn new_for_connection_query_iter( - query: Query, - connection: Arc, + // Given a working connection query as many pages as possible until the first error. + // + // Contract: this function must either: + // - Return an error + // - Return Ok but have attempted to send a page via self.sender + async fn query_pages( + &mut self, + connection: &Arc, consistency: Consistency, - serial_consistency: Option, - ) -> Result { - let (sender, receiver) = mpsc::channel::>(1); - - let page_size = query.get_validated_page_size(); - - let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { - sender: sender.into(), - fetcher: |paging_state| { - connection.query_raw_with_consistency( - &query, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - }, - }; - worker.work().await - }; - - Self::new_from_worker_future(worker_task, receiver).await + node: NodeRef<'_>, + ) -> Result { + loop { + let request_span = (self.span_creator)(); + match self + .query_one_page(connection, consistency, node, &request_span) + .instrument(request_span.span().clone()) + .await? + { + ControlFlow::Break(proof) => return Ok(proof), + ControlFlow::Continue(_) => {} + } + } } - pub(crate) async fn new_for_connection_execute_iter( - prepared: PreparedStatement, - values: SerializedValues, - connection: Arc, + async fn query_one_page( + &mut self, + connection: &Arc, consistency: Consistency, - serial_consistency: Option, - ) -> Result { - let (sender, receiver) = mpsc::channel::>(1); - - let page_size = prepared.get_validated_page_size(); - - let worker_task = async move { - let worker = SingleConnectionRowIteratorWorker { - sender: sender.into(), - fetcher: |paging_state| { - connection.execute_raw_with_consistency( - &prepared, - &values, - consistency, - serial_consistency, - Some(page_size), - paging_state, - ) - }, - }; - worker.work().await - }; - - Self::new_from_worker_future(worker_task, receiver).await - } - - async fn new_from_worker_future( - worker_task: impl Future + Send + 'static, - mut receiver: mpsc::Receiver>, - ) -> Result { - tokio::task::spawn(worker_task); + node: NodeRef<'_>, + request_span: &RequestSpan, + ) -> Result, QueryError> { + self.metrics.inc_total_paged_queries(); + let query_start = std::time::Instant::now(); - // This unwrap is safe because: - // - The future returned by worker.work sends at least one item - // to the channel (the PageSendAttemptedProof helps enforce this) - // - That future is polled in a tokio::task which isn't going to be - // cancelled - let pages_received = receiver.recv().await.unwrap()?; + trace!( + connection = %connection.get_connect_address(), + "Sending" + ); + self.log_attempt_start(connection.get_connect_address()); - Ok(RowIterator { - current_row_idx: 0, - current_page: pages_received.rows, - page_receiver: receiver, - tracing_ids: if let Some(tracing_id) = pages_received.tracing_id { - vec![tracing_id] - } else { - Vec::new() - }, - }) - } + let query_response = + (self.page_query)(connection.clone(), consistency, self.paging_state.clone()) + .await + .and_then(QueryResponse::into_non_error_query_response); - /// If tracing was enabled returns tracing ids of all finished page queries - pub fn get_tracing_ids(&self) -> &[Uuid] { - &self.tracing_ids - } + let elapsed = query_start.elapsed(); - /// Returns specification of row columns - pub fn get_column_specs(&self) -> &[ColumnSpec<'static>] { - self.current_page.metadata.col_specs() - } + request_span.record_shard_id(connection); - fn is_current_page_exhausted(&self) -> bool { - self.current_row_idx >= self.current_page.rows.len() - } -} + match query_response { + Ok(NonErrorQueryResponse { + response: + NonErrorResponse::Result(result::Result::Rows((rows, paging_state_response))), + tracing_id, + .. + }) => { + let _ = self.metrics.log_query_latency(elapsed.as_millis() as u64); + self.log_attempt_success(); + self.log_query_success(); + self.execution_profile + .load_balancing_policy + .on_query_success(&self.statement_info, elapsed, node); -// A separate module is used here so that the parent module cannot construct -// SendAttemptedProof directly. -mod checked_channel_sender { - use scylla_cql::frame::{ - request::query::PagingStateResponse, - response::result::{ResultMetadata, Rows}, - }; - use std::{marker::PhantomData, sync::Arc}; - use tokio::sync::mpsc; - use uuid::Uuid; + request_span.record_raw_rows_fields(&rows); - use crate::transport::errors::QueryError; + let received_page = ReceivedPage { rows, tracing_id }; - use super::ReceivedPage; + // Send next page to RowIterator + let (proof, res) = self.sender.send(Ok(received_page)).await; + if res.is_err() { + // channel was closed, RowIterator was dropped - should shutdown + return Ok(ControlFlow::Break(proof)); + } - /// A value whose existence proves that there was an attempt - /// to send an item of type T through a channel. - /// Can only be constructed by ProvingSender::send. - pub(crate) struct SendAttemptedProof(PhantomData); + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(paging_state) => { + self.paging_state = paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(ControlFlow::Break(proof)); + } + } - /// An mpsc::Sender which returns proofs that it attempted to send items. - pub(crate) struct ProvingSender(mpsc::Sender); + // Query succeeded, reset retry policy for future retries + self.retry_session.reset(); + self.log_query_start(); - impl From> for ProvingSender { - fn from(s: mpsc::Sender) -> Self { - Self(s) + Ok(ControlFlow::Continue(())) + } + Err(err) => { + let err = err.into(); + self.metrics.inc_failed_paged_queries(); + self.execution_profile + .load_balancing_policy + .on_query_failure(&self.statement_info, elapsed, node, &err); + Err(err) + } + Ok(NonErrorQueryResponse { + response: NonErrorResponse::Result(_), + tracing_id, + .. + }) => { + // We have most probably sent a modification statement (e.g. INSERT or UPDATE), + // so let's return an empty iterator as suggested in #631. + + // We must attempt to send something because the iterator expects it. + let (proof, _) = self.sender.send_empty_page(tracing_id).await; + Ok(ControlFlow::Break(proof)) + } + Ok(response) => { + self.metrics.inc_failed_paged_queries(); + let err = + ProtocolError::UnexpectedResponse(response.response.to_response_kind()).into(); + self.execution_profile + .load_balancing_policy + .on_query_failure(&self.statement_info, elapsed, node, &err); + Err(err) + } } } - impl ProvingSender { - pub(crate) async fn send( - &self, - value: T, - ) -> (SendAttemptedProof, Result<(), mpsc::error::SendError>) { - (SendAttemptedProof(PhantomData), self.0.send(value).await) - } + fn log_query_start(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; + + self.current_query_id = Some(history_listener.log_query_start()); } - type ResultPage = Result; + fn log_query_success(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - impl ProvingSender { - pub(crate) async fn send_empty_page( - &self, - tracing_id: Option, - ) -> ( - SendAttemptedProof, - Result<(), mpsc::error::SendError>, - ) { - let empty_page = ReceivedPage { - rows: Rows { - metadata: Arc::new(ResultMetadata::mock_empty()), - paging_state_response: PagingStateResponse::NoMorePages, - rows_count: 0, - rows: Vec::new(), - serialized_size: 0, - }, - tracing_id, - }; - self.send(Ok(empty_page)).await - } + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_query_success(query_id); } -} -use checked_channel_sender::{ProvingSender, SendAttemptedProof}; + fn log_query_error(&mut self, error: &QueryError) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; -type PageSendAttemptedProof = SendAttemptedProof>; + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; -// RowIteratorWorker works in the background to fetch pages -// RowIterator receives them through a channel -struct RowIteratorWorker<'a, QueryFunc, SpanCreatorFunc> { - sender: ProvingSender>, + history_listener.log_query_error(query_id, error); + } - // Closure used to perform a single page query - // AsyncFn(Arc, Option>) -> Result - page_query: QueryFunc, + fn log_attempt_start(&mut self, node_addr: SocketAddr) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - statement_info: RoutingInfo<'a>, - query_is_idempotent: bool, - query_consistency: Consistency, - retry_session: Box, - execution_profile: Arc, - metrics: Arc, + let query_id: history::QueryId = match &self.current_query_id { + Some(id) => *id, + None => return, + }; - paging_state: PagingState, + self.current_attempt_id = + Some(history_listener.log_attempt_start(query_id, None, node_addr)); + } - history_listener: Option>, - current_query_id: Option, - current_attempt_id: Option, + fn log_attempt_success(&mut self) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; - parent_span: tracing::Span, - span_creator: SpanCreatorFunc, + let attempt_id: history::AttemptId = match &self.current_attempt_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_attempt_success(attempt_id); + } + + fn log_attempt_error(&mut self, error: &QueryError, retry_decision: &RetryDecision) { + let history_listener: &dyn HistoryListener = match &self.history_listener { + Some(hl) => &**hl, + None => return, + }; + + let attempt_id: history::AttemptId = match &self.current_attempt_id { + Some(id) => *id, + None => return, + }; + + history_listener.log_attempt_error(attempt_id, error, retry_decision); + } } -impl RowIteratorWorker<'_, QueryFunc, SpanCreator> +/// A massively simplified version of the RowIteratorWorker. It does not have +/// any complicated logic related to retries, it just fetches pages from +/// a single connection. +struct SingleConnectionRowIteratorWorker { + sender: ProvingSender>, + fetcher: Fetcher, +} + +impl SingleConnectionRowIteratorWorker where - QueryFunc: Fn(Arc, Consistency, PagingState) -> QueryFut, - QueryFut: Future>, - SpanCreator: Fn() -> RequestSpan, + Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, + FetchFut: Future> + Send, { - // Contract: this function MUST send at least one item through self.sender - async fn work(mut self, cluster_data: Arc) -> PageSendAttemptedProof { - let load_balancer = self.execution_profile.load_balancing_policy.clone(); - let statement_info = self.statement_info.clone(); - let query_plan = - load_balancing::Plan::new(load_balancer.as_ref(), &statement_info, &cluster_data); + async fn work(mut self) -> PageSendAttemptedProof { + match self.do_work().await { + Ok(proof) => proof, + Err(err) => { + let (proof, _) = self.sender.send(Err(err)).await; + proof + } + } + } - let mut last_error: QueryError = QueryError::EmptyPlan; - let mut current_consistency: Consistency = self.query_consistency; + async fn do_work(&mut self) -> Result { + let mut paging_state = PagingState::start(); + loop { + let result = (self.fetcher)(paging_state).await?; + let response = result.into_non_error_query_response()?; + match response.response { + NonErrorResponse::Result(result::Result::Rows((rows, paging_state_response))) => { + let (proof, send_result) = self + .sender + .send(Ok(ReceivedPage { + rows, + tracing_id: response.tracing_id, + })) + .await; - self.log_query_start(); + if send_result.is_err() { + // channel was closed, RowIterator was dropped - should shutdown + return Ok(proof); + } - 'nodes_in_plan: for (node, shard) in query_plan { - let span = - trace_span!(parent: &self.parent_span, "Executing query", node = %node.address); - // For each node in the plan choose a connection to use - // This connection will be reused for same node retries to preserve paging cache on the shard - let connection: Arc = match node - .connection_for_shard(shard) - .instrument(span.clone()) - .await - { - Ok(connection) => connection, - Err(e) => { - trace!( - parent: &span, - error = %e, - "Choosing connection failed" - ); - last_error = e.into(); - // Broken connection doesn't count as a failed query, don't log in metrics - continue 'nodes_in_plan; + match paging_state_response.into_paging_control_flow() { + ControlFlow::Continue(new_paging_state) => { + paging_state = new_paging_state; + } + ControlFlow::Break(()) => { + // Reached the last query, shutdown + return Ok(proof); + } + } } - }; + NonErrorResponse::Result(_) => { + // We have most probably sent a modification statement (e.g. INSERT or UPDATE), + // so let's return an empty iterator as suggested in #631. - 'same_node_retries: loop { - trace!(parent: &span, "Execution started"); - // Query pages until an error occurs - let queries_result: Result = self - .query_pages(&connection, current_consistency, node) - .instrument(span.clone()) - .await; + // We must attempt to send something because the iterator expects it. + let (proof, _) = self.sender.send_empty_page(response.tracing_id).await; + return Ok(proof); + } + _ => { + return Err(ProtocolError::UnexpectedResponse( + response.response.to_response_kind(), + ) + .into()); + } + } + } + } +} - last_error = match queries_result { - Ok(proof) => { - trace!(parent: &span, "Query succeeded"); - // query_pages returned Ok, so we are guaranteed - // that it attempted to send at least one page - // through self.sender and we can safely return now. - return proof; - } - Err(error) => { - trace!( - parent: &span, - error = %error, - "Query failed" - ); - error - } - }; - - // Use retry policy to decide what to do next - let query_info = QueryInfo { - error: &last_error, - is_idempotent: self.query_is_idempotent, - consistency: self.query_consistency, - }; +/// An intermediate object that allows to construct an iterator over a query +/// that is asynchronously paged in the background. +/// +/// Before the results can be processed in a convenient way, the QueryPager +/// needs to be cast into a typed iterator. This is done by use of `into_typed()` method. +/// As the method is generic over the target type, the turbofish syntax +/// can come in handy there, e.g. `raw_iter.into_typed::<(i32, &str, Uuid)>()`. +/// +/// A pre-0.15.0 interface is also available, although deprecated: +/// `into_legacy()` method converts QueryPager to LegacyRowIterator, +/// enabling Stream'ed operation on rows being eagerly deserialized +/// to the middle-man [Row] type. This is inefficient, especially if +/// [Row] is not the intended target type. +pub struct QueryPager { + current_page: RawRowLendingIterator, + page_receiver: mpsc::Receiver>, + tracing_ids: Vec, +} - let retry_decision = self.retry_session.decide_should_retry(query_info); - trace!( - parent: &span, - retry_decision = format!("{:?}", retry_decision).as_str() - ); - self.log_attempt_error(&last_error, &retry_decision); - match retry_decision { - RetryDecision::RetrySameNode(cl) => { - self.metrics.inc_retries_num(); - current_consistency = cl.unwrap_or(current_consistency); - continue 'same_node_retries; - } - RetryDecision::RetryNextNode(cl) => { - self.metrics.inc_retries_num(); - current_consistency = cl.unwrap_or(current_consistency); - continue 'nodes_in_plan; - } - RetryDecision::DontRetry => break 'nodes_in_plan, - RetryDecision::IgnoreWriteError => { - warn!("Ignoring error during fetching pages; stopping fetching."); - // If we are here then, most likely, we didn't send - // anything through the self.sender channel. - // Although we are in an awkward situation (_iter - // interface isn't meant for sending writes), - // we must attempt to send something because - // the iterator expects it. - let (proof, _) = self.sender.send_empty_page(None).await; - return proof; - } - }; - } +// QueryPager is not an iterator or a stream! However, it implements +// a `next()` method that returns a [ColumnIterator], which can be used +// to manually deserialize a row. +// The `ColumnIterator` borrows from the `QueryPager`, and the [futures::Stream] trait +// does not allow for such a pattern. Lending streams are not a thing yet. +impl QueryPager { + /// Returns the next item (`ColumnIterator`) from the stream. + /// + /// This can be used with `type_check() for manual deserialization - see example below. + /// + /// This is not a part of the `Stream` interface because the returned iterator + /// borrows from self. + /// + /// This is cancel-safe. + async fn next(&mut self) -> Option> { + let res = std::future::poll_fn(|cx| Pin::new(&mut *self).poll_fill_page(cx)).await; + match res { + Some(Ok(())) => {} + Some(Err(err)) => return Some(Err(err)), + None => return None, } - // Send last_error to RowIterator - query failed fully - self.log_query_error(&last_error); - let (proof, _) = self.sender.send(Err(last_error)).await; - proof + // We are guaranteed here to have a non-empty page, so unwrap + Some( + self.current_page + .next() + .unwrap() + .map_err(|e| RowsParseError::from(e).into()), + ) } - // Given a working connection query as many pages as possible until the first error. - // - // Contract: this function must either: - // - Return an error - // - Return Ok but have attempted to send a page via self.sender - async fn query_pages( - &mut self, - connection: &Arc, - consistency: Consistency, - node: NodeRef<'_>, - ) -> Result { - loop { - let request_span = (self.span_creator)(); - match self - .query_one_page(connection, consistency, node, &request_span) - .instrument(request_span.span().clone()) - .await? - { - ControlFlow::Break(proof) => return Ok(proof), - ControlFlow::Continue(_) => {} - } + /// Tries to acquire a non-empty page, if current page is exhausted. + fn poll_fill_page<'r>( + mut self: Pin<&'r mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + if !self.is_current_page_exhausted() { + return Poll::Ready(Some(Ok(()))); + } + ready_some_ok!(self.as_mut().poll_next_page(cx)); + if self.is_current_page_exhausted() { + // We most likely got a zero-sized page. + // Try again later. + cx.waker().wake_by_ref(); + Poll::Pending + } else { + Poll::Ready(Some(Ok(()))) } } - async fn query_one_page( - &mut self, - connection: &Arc, - consistency: Consistency, - node: NodeRef<'_>, - request_span: &RequestSpan, - ) -> Result, QueryError> { - self.metrics.inc_total_paged_queries(); - let query_start = std::time::Instant::now(); + /// Makes an attempt to acquire the next page (which may be empty). + /// + /// On success, returns Some(Ok()). + /// On failure, returns Some(Err()). + /// If there are no more pages, returns None. + fn poll_next_page<'r>( + mut self: Pin<&'r mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let mut s = self.as_mut(); - trace!( - connection = %connection.get_connect_address(), - "Sending" - ); - self.log_attempt_start(connection.get_connect_address()); + let received_page = ready_some_ok!(Pin::new(&mut s.page_receiver).poll_recv(cx)); + let raw_rows_with_deserialized_metadata = received_page.rows.deserialize_metadata()?; + s.current_page = RawRowLendingIterator::new(raw_rows_with_deserialized_metadata); - let query_response = - (self.page_query)(connection.clone(), consistency, self.paging_state.clone()) - .await - .and_then(QueryResponse::into_non_error_query_response); + if let Some(tracing_id) = received_page.tracing_id { + s.tracing_ids.push(tracing_id); + } - let elapsed = query_start.elapsed(); + Poll::Ready(Some(Ok(()))) + } - request_span.record_shard_id(connection); + /// Type-checks the iterator against given type. + /// + /// This is automatically called upon transforming [QueryPager] into [TypedRowLendingStream]. + /// Can be used with `next()` for manual deserialization. See `next()` for an example. + #[inline] + pub fn type_check<'frame, 'metadata, RowT: DeserializeRow<'frame, 'metadata>>( + &self, + ) -> Result<(), TypeCheckError> { + RowT::type_check(self.column_specs().inner()) + } - match query_response { - Ok(NonErrorQueryResponse { - response: NonErrorResponse::Result(result::Result::Rows(mut rows)), - tracing_id, - .. - }) => { - let _ = self.metrics.log_query_latency(elapsed.as_millis() as u64); - self.log_attempt_success(); - self.log_query_success(); - self.execution_profile - .load_balancing_policy - .on_query_success(&self.statement_info, elapsed, node); + /// Casts the iterator to a given row type, enabling Stream'ed operations + /// on rows, which deserialize them on-the-fly to that given type. + /// It allows deserializing borrowed types, but hence cannot implement [Stream] + /// (because [Stream] is not lending). + /// Begins with performing type check. + #[inline] + pub fn rows_lending_stream<'frame, 'metadata, RowT: DeserializeRow<'frame, 'metadata>>( + self, + ) -> Result, TypeCheckError> + where + 'frame: 'metadata, + { + TypedRowLendingStream::::new(self) + } - let paging_state_response = rows.paging_state_response.take(); + /// Casts the iterator to a given row type, enabling [Stream]'ed operations + /// on rows, which deserialize them on-the-fly to that given type. + /// It only allows deserializing owned types, because [Stream] is not lending. + /// Begins with performing type check. + #[inline] + pub fn rows_stream<'frame, 'metadata, RowT: 'static + DeserializeRow<'frame, 'metadata>>( + self, + ) -> Result, TypeCheckError> + where + 'frame: 'metadata, + { + TypedRowLendingStream::::new(self).map(|typed_row_lending_stream| TypedRowStream { + typed_row_lending_stream, + }) + } - request_span.record_rows_fields(&rows); + /// Converts this iterator into an iterator over rows parsed as given type, + /// using the legacy deserialization framework. + /// This is inefficient, because all rows are being eagerly deserialized + /// to a middle-man [Row] type. + #[inline] + pub fn into_legacy(self) -> LegacyRowIterator { + LegacyRowIterator::new(self) + } - let received_page = ReceivedPage { rows, tracing_id }; + pub(crate) async fn new_for_query( + query: Query, + execution_profile: Arc, + cluster_data: Arc, + metrics: Arc, + ) -> Result { + let (sender, receiver) = mpsc::channel(1); - // Send next page to RowIterator - let (proof, res) = self.sender.send(Ok(received_page)).await; - if res.is_err() { - // channel was closed, RowIterator was dropped - should shutdown - return Ok(ControlFlow::Break(proof)); + let consistency = query + .config + .consistency + .unwrap_or(execution_profile.consistency); + let serial_consistency = query + .config + .serial_consistency + .unwrap_or(execution_profile.serial_consistency); + + let page_size = query.get_validated_page_size(); + + let routing_info = RoutingInfo { + consistency, + serial_consistency, + ..Default::default() + }; + + let retry_session = query + .get_retry_policy() + .map(|rp| &**rp) + .unwrap_or(&*execution_profile.retry_policy) + .new_session(); + + let parent_span = tracing::Span::current(); + let worker_task = async move { + let query_ref = &query; + + let page_query = |connection: Arc, + consistency: Consistency, + paging_state: PagingState| { + async move { + connection + .query_raw_with_consistency( + query_ref, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + .await } + }; - match paging_state_response.into_paging_control_flow() { - ControlFlow::Continue(paging_state) => { - self.paging_state = paging_state; - } - ControlFlow::Break(()) => { - // Reached the last query, shutdown - return Ok(ControlFlow::Break(proof)); - } + let query_ref = &query; + + let span_creator = move || { + let span = RequestSpan::new_query(&query_ref.contents); + span.record_request_size(0); + span + }; + + let worker = RowIteratorWorker { + sender: sender.into(), + page_query, + statement_info: routing_info, + query_is_idempotent: query.config.is_idempotent, + query_consistency: consistency, + retry_session, + execution_profile, + metrics, + paging_state: PagingState::start(), + history_listener: query.config.history_listener.clone(), + current_query_id: None, + current_attempt_id: None, + parent_span, + span_creator, + }; + + worker.work(cluster_data).await + }; + + Self::new_from_worker_future(worker_task, receiver).await + } + + pub(crate) async fn new_for_prepared_statement( + config: PreparedIteratorConfig, + ) -> Result { + let (sender, receiver) = mpsc::channel(1); + + let consistency = config + .prepared + .config + .consistency + .unwrap_or(config.execution_profile.consistency); + let serial_consistency = config + .prepared + .config + .serial_consistency + .unwrap_or(config.execution_profile.serial_consistency); + + let page_size = config.prepared.get_validated_page_size(); + + let retry_session = config + .prepared + .get_retry_policy() + .map(|rp| &**rp) + .unwrap_or(&*config.execution_profile.retry_policy) + .new_session(); + + let parent_span = tracing::Span::current(); + let worker_task = async move { + let prepared_ref = &config.prepared; + let values_ref = &config.values; + + let (partition_key, token) = match prepared_ref + .extract_partition_key_and_calculate_token( + prepared_ref.get_partitioner_name(), + values_ref, + ) { + Ok(res) => res.unzip(), + Err(err) => { + let (proof, _res) = ProvingSender::from(sender).send(Err(err)).await; + return proof; } + }; - // Query succeeded, reset retry policy for future retries - self.retry_session.reset(); - self.log_query_start(); + let table_spec = config.prepared.get_table_spec(); + let statement_info = RoutingInfo { + consistency, + serial_consistency, + token, + table: table_spec, + is_confirmed_lwt: config.prepared.is_confirmed_lwt(), + }; - Ok(ControlFlow::Continue(())) - } - Err(err) => { - let err = err.into(); - self.metrics.inc_failed_paged_queries(); - self.execution_profile - .load_balancing_policy - .on_query_failure(&self.statement_info, elapsed, node, &err); - Err(err) - } - Ok(NonErrorQueryResponse { - response: NonErrorResponse::Result(_), - tracing_id, - .. - }) => { - // We have most probably sent a modification statement (e.g. INSERT or UPDATE), - // so let's return an empty iterator as suggested in #631. + let page_query = |connection: Arc, + consistency: Consistency, + paging_state: PagingState| async move { + connection + .execute_raw_with_consistency( + prepared_ref, + values_ref, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + .await + }; + + let serialized_values_size = config.values.buffer_size(); + + let replicas: Option> = + if let (Some(table_spec), Some(token)) = + (statement_info.table, statement_info.token) + { + Some( + config + .cluster_data + .get_token_endpoints_iter(table_spec, token) + .map(|(node, shard)| (node.clone(), shard)) + .collect(), + ) + } else { + None + }; + + let span_creator = move || { + let span = RequestSpan::new_prepared( + partition_key.as_ref().map(|pk| pk.iter()), + token, + serialized_values_size, + ); + if let Some(replicas) = replicas.as_ref() { + span.record_replicas(replicas); + } + span + }; + + let worker = RowIteratorWorker { + sender: sender.into(), + page_query, + statement_info, + query_is_idempotent: config.prepared.config.is_idempotent, + query_consistency: consistency, + retry_session, + execution_profile: config.execution_profile, + metrics: config.metrics, + paging_state: PagingState::start(), + history_listener: config.prepared.config.history_listener.clone(), + current_query_id: None, + current_attempt_id: None, + parent_span, + span_creator, + }; - // We must attempt to send something because the iterator expects it. - let (proof, _) = self.sender.send_empty_page(tracing_id).await; - Ok(ControlFlow::Break(proof)) - } - Ok(response) => { - self.metrics.inc_failed_paged_queries(); - let err = - ProtocolError::UnexpectedResponse(response.response.to_response_kind()).into(); - self.execution_profile - .load_balancing_policy - .on_query_failure(&self.statement_info, elapsed, node, &err); - Err(err) - } - } + worker.work(config.cluster_data).await + }; + + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_start(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, + pub(crate) async fn new_for_connection_query_iter( + query: Query, + connection: Arc, + consistency: Consistency, + serial_consistency: Option, + ) -> Result { + let (sender, receiver) = mpsc::channel::>(1); + + let page_size = query.get_validated_page_size(); + + let worker_task = async move { + let worker = SingleConnectionRowIteratorWorker { + sender: sender.into(), + fetcher: |paging_state| { + connection.query_raw_with_consistency( + &query, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + }, + }; + worker.work().await }; - self.current_query_id = Some(history_listener.log_query_start()); + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_success(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + pub(crate) async fn new_for_connection_execute_iter( + prepared: PreparedStatement, + values: SerializedValues, + connection: Arc, + consistency: Consistency, + serial_consistency: Option, + ) -> Result { + let (sender, receiver) = mpsc::channel::>(1); - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, + let page_size = prepared.get_validated_page_size(); + + let worker_task = async move { + let worker = SingleConnectionRowIteratorWorker { + sender: sender.into(), + fetcher: |paging_state| { + connection.execute_raw_with_consistency( + &prepared, + &values, + consistency, + serial_consistency, + Some(page_size), + paging_state, + ) + }, + }; + worker.work().await }; - history_listener.log_query_success(query_id); + Self::new_from_worker_future(worker_task, receiver).await } - fn log_query_error(&mut self, error: &QueryError) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + async fn new_from_worker_future( + worker_task: impl Future + Send + 'static, + mut receiver: mpsc::Receiver>, + ) -> Result { + tokio::task::spawn(worker_task); - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, - }; + // This unwrap is safe because: + // - The future returned by worker.work sends at least one item + // to the channel (the PageSendAttemptedProof helps enforce this) + // - That future is polled in a tokio::task which isn't going to be + // cancelled + let page_received = receiver.recv().await.unwrap()?; + let raw_rows_with_deserialized_metadata = page_received.rows.deserialize_metadata()?; - history_listener.log_query_error(query_id, error); + Ok(Self { + current_page: RawRowLendingIterator::new(raw_rows_with_deserialized_metadata), + page_receiver: receiver, + tracing_ids: if let Some(tracing_id) = page_received.tracing_id { + vec![tracing_id] + } else { + Vec::new() + }, + }) } - fn log_attempt_start(&mut self, node_addr: SocketAddr) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + /// If tracing was enabled returns tracing ids of all finished page queries + #[inline] + pub fn tracing_ids(&self) -> &[Uuid] { + &self.tracing_ids + } - let query_id: history::QueryId = match &self.current_query_id { - Some(id) => *id, - None => return, - }; + /// Returns specification of row columns + #[inline] + pub fn column_specs(&self) -> ColumnSpecs<'_> { + ColumnSpecs::new(self.current_page.metadata().col_specs()) + } - self.current_attempt_id = - Some(history_listener.log_attempt_start(query_id, None, node_addr)); + fn is_current_page_exhausted(&self) -> bool { + self.current_page.rows_remaining() == 0 } +} - fn log_attempt_success(&mut self) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; +/// Returned by [QueryPager::rows_lending_stream]. +/// +/// Does not implement [Stream], but permits deserialization of borrowed types. +/// To use [Stream] API (only accessible for owned types), use [QueryPager::rows_stream]. +pub struct TypedRowLendingStream { + raw_row_lending_stream: QueryPager, + _phantom: std::marker::PhantomData, +} - let attempt_id: history::AttemptId = match &self.current_attempt_id { - Some(id) => *id, - None => return, - }; +impl Unpin for TypedRowLendingStream {} - history_listener.log_attempt_success(attempt_id); +impl TypedRowLendingStream { + /// If tracing was enabled, returns tracing ids of all finished page queries. + #[inline] + pub fn tracing_ids(&self) -> &[Uuid] { + self.raw_row_lending_stream.tracing_ids() } - fn log_attempt_error(&mut self, error: &QueryError, retry_decision: &RetryDecision) { - let history_listener: &dyn HistoryListener = match &self.history_listener { - Some(hl) => &**hl, - None => return, - }; + /// Returns specification of row columns + #[inline] + pub fn column_specs(&self) -> ColumnSpecs { + self.raw_row_lending_stream.column_specs() + } +} - let attempt_id: history::AttemptId = match &self.current_attempt_id { - Some(id) => *id, - None => return, - }; +impl<'frame, 'metadata, RowT> TypedRowLendingStream +where + 'frame: 'metadata, + RowT: DeserializeRow<'frame, 'metadata>, +{ + fn new(raw_stream: QueryPager) -> Result { + raw_stream.type_check::()?; - history_listener.log_attempt_error(attempt_id, error, retry_decision); + Ok(Self { + raw_row_lending_stream: raw_stream, + _phantom: Default::default(), + }) + } + + /// Stream-like next() implementation for TypedRowLendingStream. + /// + /// It also works with borrowed types! For example, &str is supported. + /// However, this is not a Stream. To create a Stream, use `into_stream()`. + #[inline] + pub async fn next(&'frame mut self) -> Option> { + self.raw_row_lending_stream.next().await.map(|res| { + res.and_then(|column_iterator| { + ::deserialize(column_iterator) + .map_err(|err| RowsParseError::from(err).into()) + }) + }) + } + + /// Stream-like try_next() implementation for TypedRowLendingStream. + /// + /// It also works with borrowed types! For example, &str is supported. + /// However, this is not a Stream. To create a Stream, use `into_stream()`. + #[inline] + pub async fn try_next(&'frame mut self) -> Result, QueryError> { + self.next().await.transpose() } } -/// A massively simplified version of the RowIteratorWorker. It does not have -/// any complicated logic related to retries, it just fetches pages from -/// a single connection. -struct SingleConnectionRowIteratorWorker { - sender: ProvingSender>, - fetcher: Fetcher, +/// Returned by [QueryPager::rows_stream]. +/// +/// Implements [Stream], but only permits deserialization of owned types. +/// To use [Stream] API (only accessible for owned types), use [QueryPager::rows_stream]. +pub struct TypedRowStream { + typed_row_lending_stream: TypedRowLendingStream, } -impl SingleConnectionRowIteratorWorker +impl Unpin for TypedRowStream {} + +impl TypedRowStream { + /// If tracing was enabled, returns tracing ids of all finished page queries. + #[inline] + pub fn tracing_ids(&self) -> &[Uuid] { + self.typed_row_lending_stream.tracing_ids() + } + + /// Returns specification of row columns + #[inline] + pub fn column_specs(&self) -> ColumnSpecs { + self.typed_row_lending_stream.column_specs() + } +} + +/// Stream implementation for TypedRowStream. +/// +/// It only works with owned types! For example, &str is not supported. +impl Stream for TypedRowStream where - Fetcher: Fn(PagingState) -> FetchFut + Send + Sync, - FetchFut: Future> + Send, + RowT: for<'r> DeserializeRow<'r, 'r>, { - async fn work(mut self) -> PageSendAttemptedProof { - match self.do_work().await { - Ok(proof) => proof, - Err(err) => { - let (proof, _) = self.sender.send(Err(err)).await; - proof - } - } + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut s = self.as_mut(); + + let next_fut = s.typed_row_lending_stream.next(); + futures::pin_mut!(next_fut); + let value = ready_some_ok!(next_fut.poll(cx)); + Poll::Ready(Some(Ok(value))) } +} - async fn do_work(&mut self) -> Result { - let mut paging_state = PagingState::start(); - loop { - let result = (self.fetcher)(paging_state).await?; - let response = result.into_non_error_query_response()?; - match response.response { - NonErrorResponse::Result(result::Result::Rows(mut rows)) => { - let paging_state_response = rows.paging_state_response.take(); +mod legacy { + use super::*; - let (proof, send_result) = self - .sender - .send(Ok(ReceivedPage { - rows, - tracing_id: response.tracing_id, - })) - .await; + /// Iterator over rows returned by paged queries. + /// + /// Allows to easily access rows without worrying about handling multiple pages. + pub struct LegacyRowIterator { + raw_stream: QueryPager, + } - if send_result.is_err() { - // channel was closed, RowIterator was dropped - should shutdown - return Ok(proof); - } + impl Stream for LegacyRowIterator { + type Item = Result; - match paging_state_response.into_paging_control_flow() { - ControlFlow::Continue(new_paging_state) => { - paging_state = new_paging_state; - } - ControlFlow::Break(()) => { - // Reached the last query, shutdown - return Ok(proof); - } - } - } - NonErrorResponse::Result(_) => { - // We have most probably sent a modification statement (e.g. INSERT or UPDATE), - // so let's return an empty iterator as suggested in #631. + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut s = self.as_mut(); - // We must attempt to send something because the iterator expects it. - let (proof, _) = self.sender.send_empty_page(response.tracing_id).await; - return Ok(proof); - } - _ => { - return Err(ProtocolError::UnexpectedResponse( - response.response.to_response_kind(), - ) - .into()); - } - } + let next_fut = s.raw_stream.next(); + futures::pin_mut!(next_fut); + + let next_column_iter = ready_some_ok!(next_fut.poll(cx)); + + let next_ready_row = + Row::deserialize(next_column_iter).map_err(|e| RowsParseError::from(e).into()); + + Poll::Ready(Some(next_ready_row)) } } -} -/// Iterator over rows returned by paged queries -/// where each row is parsed as the given type\ -/// Returned by `RowIterator::into_typed` -pub struct TypedRowIterator { - row_iterator: RowIterator, - phantom_data: std::marker::PhantomData, -} + impl LegacyRowIterator { + pub(super) fn new(raw_stream: QueryPager) -> Self { + Self { raw_stream } + } -impl TypedRowIterator { - /// If tracing was enabled returns tracing ids of all finished page queries - pub fn get_tracing_ids(&self) -> &[Uuid] { - self.row_iterator.get_tracing_ids() + /// If tracing was enabled returns tracing ids of all finished page queries + pub fn get_tracing_ids(&self) -> &[Uuid] { + self.raw_stream.tracing_ids() + } + + /// Returns specification of row columns + pub fn get_column_specs(&self) -> &[ColumnSpec<'_>] { + self.raw_stream.column_specs().inner() + } + + pub fn into_typed(self) -> LegacyTypedRowIterator { + LegacyTypedRowIterator { + row_iterator: self, + _phantom_data: Default::default(), + } + } } - /// Returns specification of row columns - pub fn get_column_specs(&self) -> &[ColumnSpec<'static>] { - self.row_iterator.get_column_specs() + /// Iterator over rows returned by paged queries + /// where each row is parsed as the given type\ + /// Returned by `RowIterator::into_typed` + pub struct LegacyTypedRowIterator { + row_iterator: LegacyRowIterator, + _phantom_data: std::marker::PhantomData, } -} -/// Couldn't get next typed row from the iterator -#[derive(Error, Debug, Clone)] -pub enum NextRowError { - /// Query to fetch next page has failed - #[error(transparent)] - QueryError(#[from] QueryError), + impl LegacyTypedRowIterator { + /// If tracing was enabled returns tracing ids of all finished page queries + #[inline] + pub fn get_tracing_ids(&self) -> &[Uuid] { + self.row_iterator.get_tracing_ids() + } - /// Parsing values in row as given types failed - #[error(transparent)] - FromRowError(#[from] FromRowError), -} + /// Returns specification of row columns + #[inline] + pub fn get_column_specs(&self) -> &[ColumnSpec<'_>] { + self.row_iterator.get_column_specs() + } + } -/// Fetching pages is asynchronous so `TypedRowIterator` does not implement the `Iterator` trait.\ -/// Instead it uses the asynchronous `Stream` trait -impl Stream for TypedRowIterator { - type Item = Result; + /// Couldn't get next typed row from the iterator + #[derive(Error, Debug, Clone)] + pub enum NextRowError { + /// Query to fetch next page has failed + #[error(transparent)] + QueryError(#[from] QueryError), - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut s = self.as_mut(); + /// Parsing values in row as given types failed + #[error(transparent)] + FromRowError(#[from] FromRowError), + } - let next_elem: Option> = - match Pin::new(&mut s.row_iterator).poll_next(cx) { - Poll::Ready(next_elem) => next_elem, - Poll::Pending => return Poll::Pending, - }; + /// Fetching pages is asynchronous so `LegacyTypedRowIterator` does not implement the `Iterator` trait.\ + /// Instead it uses the asynchronous `Stream` trait + impl Stream for LegacyTypedRowIterator { + type Item = Result; - let next_ready: Option = match next_elem { - Some(Ok(next_row)) => Some(RowT::from_row(next_row).map_err(|e| e.into())), - Some(Err(e)) => Some(Err(e.into())), - None => None, - }; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut s = self.as_mut(); - Poll::Ready(next_ready) + let next_row = ready_some_ok!(Pin::new(&mut s.row_iterator).poll_next(cx)); + let typed_row_res = RowT::from_row(next_row).map_err(|e| e.into()); + Poll::Ready(Some(typed_row_res)) + } } -} -// TypedRowIterator can be moved freely for any RowT so it's Unpin -impl Unpin for TypedRowIterator {} + // LegacyTypedRowIterator can be moved freely for any RowT so it's Unpin + impl Unpin for LegacyTypedRowIterator {} +} +pub use legacy::{LegacyRowIterator, LegacyTypedRowIterator, NextRowError}; diff --git a/scylla/src/transport/large_batch_statements_test.rs b/scylla/src/transport/large_batch_statements_test.rs index 7e8fc482c..33628a49d 100644 --- a/scylla/src/transport/large_batch_statements_test.rs +++ b/scylla/src/transport/large_batch_statements_test.rs @@ -7,7 +7,7 @@ use crate::transport::errors::{BadQuery, QueryError}; use crate::{ batch::Batch, test_utils::{create_new_session_builder, unique_keyspace_name}, - QueryResult, Session, + LegacyQueryResult, Session, }; #[tokio::test] @@ -51,7 +51,11 @@ async fn create_test_session(session: Session, ks: &String) -> Session { session } -async fn write_batch(session: &Session, n: usize, ks: &String) -> Result { +async fn write_batch( + session: &Session, + n: usize, + ks: &String, +) -> Result { let mut batch_query = Batch::new(BatchType::Unlogged); let mut batch_values = Vec::new(); let query = format!("INSERT INTO {}.pairs (dummy, k, v) VALUES (0, ?, ?)", ks); diff --git a/scylla/src/transport/legacy_query_result.rs b/scylla/src/transport/legacy_query_result.rs new file mode 100644 index 000000000..5b26f380c --- /dev/null +++ b/scylla/src/transport/legacy_query_result.rs @@ -0,0 +1,659 @@ +use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; +use crate::frame::response::result::ColumnSpec; +use crate::frame::response::result::Row; +use scylla_cql::frame::response::result::{self, ResultMetadataHolder}; +use thiserror::Error; +use uuid::Uuid; + +/// Trait used to implement `Vec::into_typed` +// This is the only way to add custom method to Vec +pub trait IntoTypedRows { + fn into_typed(self) -> TypedRowIter; +} + +// Adds method Vec::into_typed(self) +// It transforms the Vec into iterator mapping to custom row type +impl IntoTypedRows for Vec { + fn into_typed(self) -> TypedRowIter { + TypedRowIter { + row_iter: self.into_iter(), + phantom_data: Default::default(), + } + } +} + +/// Iterator over rows parsed as the given type\ +/// Returned by `rows.into_typed::<(...)>()` +pub struct TypedRowIter { + row_iter: std::vec::IntoIter, + phantom_data: std::marker::PhantomData, +} + +impl Iterator for TypedRowIter { + type Item = Result; + + fn next(&mut self) -> Option { + self.row_iter.next().map(RowT::from_row) + } +} + +/// Result of a single query\ +/// Contains all rows returned by the database and some more information +#[derive(Debug)] +pub struct LegacyQueryResult { + /// Rows returned by the database.\ + /// Queries like `SELECT` will have `Some(Vec)`, while queries like `INSERT` will have `None`.\ + /// Can contain an empty Vec. + pub rows: Option>, + /// Warnings returned by the database + pub warnings: Vec, + /// CQL Tracing uuid - can only be Some if tracing is enabled for this query + pub tracing_id: Option, + /// Metadata returned along with this response. + pub(crate) metadata: Option, + /// The original size of the serialized rows in request + pub serialized_size: usize, +} + +impl LegacyQueryResult { + pub(crate) fn mock_empty() -> Self { + Self { + rows: None, + warnings: Vec::new(), + tracing_id: None, + metadata: None, + serialized_size: 0, + } + } + + /// Returns the number of received rows.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn rows_num(&self) -> Result { + match &self.rows { + Some(rows) => Ok(rows.len()), + None => Err(RowsExpectedError), + } + } + + /// Returns the received rows when present.\ + /// If `LegacyQueryResult.rows` is `None`, which means that this query is not supposed to return rows (e.g `INSERT`), returns an error.\ + /// Can return an empty `Vec`. + pub fn rows(self) -> Result, RowsExpectedError> { + match self.rows { + Some(rows) => Ok(rows), + None => Err(RowsExpectedError), + } + } + + /// Returns the received rows parsed as the given type.\ + /// Equal to `rows()?.into_typed()`.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn rows_typed(self) -> Result, RowsExpectedError> { + Ok(self.rows()?.into_typed()) + } + + /// Returns `Ok` for a result of a query that shouldn't contain any rows.\ + /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ + /// Opposite of [`rows()`](LegacyQueryResult::rows). + pub fn result_not_rows(&self) -> Result<(), RowsNotExpectedError> { + match self.rows { + Some(_) => Err(RowsNotExpectedError), + None => Ok(()), + } + } + + /// Returns rows when `LegacyQueryResult.rows` is `Some`, otherwise an empty Vec.\ + /// Equal to `rows().unwrap_or_default()`. + pub fn rows_or_empty(self) -> Vec { + self.rows.unwrap_or_default() + } + + /// Returns rows parsed as the given type.\ + /// When `LegacyQueryResult.rows` is `None`, returns 0 rows.\ + /// Equal to `rows_or_empty().into_typed::()`. + pub fn rows_typed_or_empty(self) -> TypedRowIter { + self.rows_or_empty().into_typed::() + } + + /// Returns first row from the received rows.\ + /// When the first row is not available, returns an error. + pub fn first_row(self) -> Result { + match self.maybe_first_row()? { + Some(row) => Ok(row), + None => Err(FirstRowError::RowsEmpty), + } + } + + /// Returns first row from the received rows parsed as the given type.\ + /// When the first row is not available, returns an error. + pub fn first_row_typed(self) -> Result { + Ok(self.first_row()?.into_typed()?) + } + + /// Returns `Option` containing the first of a result.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn maybe_first_row(self) -> Result, RowsExpectedError> { + Ok(self.rows()?.into_iter().next()) + } + + /// Returns `Option` containing the first of a result.\ + /// Fails when the query isn't of a type that could return rows, same as [`rows()`](LegacyQueryResult::rows). + pub fn maybe_first_row_typed( + self, + ) -> Result, MaybeFirstRowTypedError> { + match self.maybe_first_row()? { + Some(row) => Ok(Some(row.into_typed::()?)), + None => Ok(None), + } + } + + /// Returns the only received row.\ + /// Fails if the result is anything else than a single row.\ + pub fn single_row(self) -> Result { + let rows: Vec = self.rows()?; + + if rows.len() != 1 { + return Err(SingleRowError::BadNumberOfRows(rows.len())); + } + + Ok(rows.into_iter().next().unwrap()) + } + + /// Returns the only received row parsed as the given type.\ + /// Fails if the result is anything else than a single row.\ + pub fn single_row_typed(self) -> Result { + Ok(self.single_row()?.into_typed::()?) + } + + /// Returns column specifications. + #[inline] + pub fn col_specs(&self) -> &[ColumnSpec<'_>] { + self.metadata + .as_ref() + .map(|metadata| metadata.inner().col_specs()) + .unwrap_or_default() + } + + /// Returns a column specification for a column with given name, or None if not found + #[inline] + pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec<'_>)> { + self.col_specs() + .iter() + .enumerate() + .find(|(_id, spec)| spec.name() == name) + } +} + +/// [`LegacyQueryResult::rows()`](LegacyQueryResult::rows) or a similar function called on a bad LegacyQueryResult.\ +/// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ +/// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ +/// It is `None` for queries that can't return rows (e.g `INSERT`). +#[derive(Debug, Clone, Error, PartialEq, Eq)] +#[error( + "LegacyQueryResult::rows() or similar function called on a bad LegacyQueryResult. + Expected LegacyQueryResult.rows to be Some, but it was None. + LegacyQueryResult.rows is Some for queries that can return rows (e.g SELECT). + It is None for queries that can't return rows (e.g INSERT)." +)] +pub struct RowsExpectedError; + +/// [`LegacyQueryResult::result_not_rows()`](LegacyQueryResult::result_not_rows) called on a bad LegacyQueryResult.\ +/// Expected `LegacyQueryResult.rows` to be `None`, but it was `Some`.\ +/// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ +/// It is `None` for queries that can't return rows (e.g `INSERT`). +#[derive(Debug, Clone, Error, PartialEq, Eq)] +#[error( + "LegacyQueryResult::result_not_rows() called on a bad LegacyQueryResult. + Expected LegacyQueryResult.rows to be None, but it was Some. + LegacyQueryResult.rows is Some for queries that can return rows (e.g SELECT). + It is None for queries that can't return rows (e.g INSERT)." +)] +pub struct RowsNotExpectedError; + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum FirstRowError { + /// [`LegacyQueryResult::first_row()`](LegacyQueryResult::first_row) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Rows in `LegacyQueryResult` are empty + #[error("Rows in LegacyQueryResult are empty")] + RowsEmpty, +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum FirstRowTypedError { + /// [`LegacyQueryResult::first_row_typed()`](LegacyQueryResult::first_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Rows in `LegacyQueryResult` are empty + #[error("Rows in LegacyQueryResult are empty")] + RowsEmpty, + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum MaybeFirstRowTypedError { + /// [`LegacyQueryResult::maybe_first_row_typed()`](LegacyQueryResult::maybe_first_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`. + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum SingleRowError { + /// [`LegacyQueryResult::single_row()`](LegacyQueryResult::single_row) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Expected a single row, found other number of rows + #[error("Expected a single row, found {0} rows")] + BadNumberOfRows(usize), +} + +#[derive(Debug, Clone, Error, PartialEq, Eq)] +pub enum SingleRowTypedError { + /// [`LegacyQueryResult::single_row_typed()`](LegacyQueryResult::single_row_typed) called on a bad LegacyQueryResult.\ + /// Expected `LegacyQueryResult.rows` to be `Some`, but it was `None`.\ + /// `LegacyQueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ + /// It is `None` for queries that can't return rows (e.g `INSERT`). + #[error(transparent)] + RowsExpected(#[from] RowsExpectedError), + + /// Expected a single row, found other number of rows + #[error("Expected a single row, found {0} rows")] + BadNumberOfRows(usize), + + /// Parsing row as the given type failed + #[error(transparent)] + FromRowError(#[from] FromRowError), +} + +impl From for FirstRowTypedError { + fn from(err: FirstRowError) -> FirstRowTypedError { + match err { + FirstRowError::RowsExpected(e) => FirstRowTypedError::RowsExpected(e), + FirstRowError::RowsEmpty => FirstRowTypedError::RowsEmpty, + } + } +} + +impl From for SingleRowTypedError { + fn from(err: SingleRowError) -> SingleRowTypedError { + match err { + SingleRowError::RowsExpected(e) => SingleRowTypedError::RowsExpected(e), + SingleRowError::BadNumberOfRows(r) => SingleRowTypedError::BadNumberOfRows(r), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + frame::response::result::{CqlValue, Row}, + test_utils::setup_tracing, + }; + use std::convert::TryInto; + use std::sync::Arc; + + use assert_matches::assert_matches; + use scylla_cql::frame::response::result::{ColumnType, ResultMetadata, TableSpec}; + + // Returns specified number of rows, each one containing one int32 value. + // Values are 0, 1, 2, 3, 4, ... + fn make_rows(rows_num: usize) -> Vec { + let mut rows: Vec = Vec::with_capacity(rows_num); + for cur_value in 0..rows_num { + let int_val: i32 = cur_value.try_into().unwrap(); + rows.push(Row { + columns: vec![Some(CqlValue::Int(int_val))], + }); + } + rows + } + + // Just like make_rows, but each column has one String value + // values are "val0", "val1", "val2", ... + fn make_string_rows(rows_num: usize) -> Vec { + let mut rows: Vec = Vec::with_capacity(rows_num); + for cur_value in 0..rows_num { + rows.push(Row { + columns: vec![Some(CqlValue::Text(format!("val{}", cur_value)))], + }); + } + rows + } + + fn make_test_metadata() -> ResultMetadata<'static> { + let table_spec = TableSpec::borrowed("some_keyspace", "some_table"); + + let column_spec = ColumnSpec::borrowed("column0", ColumnType::Int, table_spec); + + ResultMetadata::new_for_test(1, vec![column_spec]) + } + + fn make_not_rows_query_result() -> LegacyQueryResult { + LegacyQueryResult { + rows: None, + warnings: vec![], + tracing_id: None, + metadata: None, + serialized_size: 0, + } + } + + fn make_rows_query_result(rows_num: usize) -> LegacyQueryResult { + let mut res = make_not_rows_query_result(); + res.rows = Some(make_rows(rows_num)); + res.metadata = Some(ResultMetadataHolder::SharedCached(Arc::new( + make_test_metadata(), + ))); + res + } + + fn make_string_rows_query_result(rows_num: usize) -> LegacyQueryResult { + let mut res = make_not_rows_query_result(); + res.rows = Some(make_string_rows(rows_num)); + res.metadata = Some(ResultMetadataHolder::SharedCached(Arc::new( + make_test_metadata(), + ))); + res + } + + #[test] + fn rows_num_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().rows_num(), + Err(RowsExpectedError) + ); + assert_eq!(make_rows_query_result(0).rows_num(), Ok(0)); + assert_eq!(make_rows_query_result(1).rows_num(), Ok(1)); + assert_eq!(make_rows_query_result(2).rows_num(), Ok(2)); + assert_eq!(make_rows_query_result(3).rows_num(), Ok(3)); + } + + #[test] + fn rows_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().rows(), Err(RowsExpectedError)); + assert_eq!(make_rows_query_result(0).rows(), Ok(vec![])); + assert_eq!(make_rows_query_result(1).rows(), Ok(make_rows(1))); + assert_eq!(make_rows_query_result(2).rows(), Ok(make_rows(2))); + } + + #[test] + fn rows_typed_test() { + setup_tracing(); + assert!(make_not_rows_query_result().rows_typed::<(i32,)>().is_err()); + + let rows0: Vec<(i32,)> = make_rows_query_result(0) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows0, vec![]); + + let rows1: Vec<(i32,)> = make_rows_query_result(1) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows1, vec![(0,)]); + + let rows2: Vec<(i32,)> = make_rows_query_result(2) + .rows_typed::<(i32,)>() + .unwrap() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows2, vec![(0,), (1,)]); + } + + #[test] + fn result_not_rows_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().result_not_rows(), Ok(())); + assert_eq!( + make_rows_query_result(0).result_not_rows(), + Err(RowsNotExpectedError) + ); + assert_eq!( + make_rows_query_result(1).result_not_rows(), + Err(RowsNotExpectedError) + ); + assert_eq!( + make_rows_query_result(2).result_not_rows(), + Err(RowsNotExpectedError) + ); + } + + #[test] + fn rows_or_empty_test() { + setup_tracing(); + assert_eq!(make_not_rows_query_result().rows_or_empty(), vec![]); + assert_eq!(make_rows_query_result(0).rows_or_empty(), make_rows(0)); + assert_eq!(make_rows_query_result(1).rows_or_empty(), make_rows(1)); + assert_eq!(make_rows_query_result(2).rows_or_empty(), make_rows(2)); + } + + #[test] + fn rows_typed_or_empty() { + setup_tracing(); + let rows_empty: Vec<(i32,)> = make_not_rows_query_result() + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows_empty, vec![]); + + let rows0: Vec<(i32,)> = make_rows_query_result(0) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows0, vec![]); + + let rows1: Vec<(i32,)> = make_rows_query_result(1) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows1, vec![(0,)]); + + let rows2: Vec<(i32,)> = make_rows_query_result(2) + .rows_typed_or_empty::<(i32,)>() + .map(|r| r.unwrap()) + .collect(); + + assert_eq!(rows2, vec![(0,), (1,)]); + } + + #[test] + fn first_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().first_row(), + Err(FirstRowError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).first_row(), + Err(FirstRowError::RowsEmpty) + ); + assert_eq!( + make_rows_query_result(1).first_row(), + Ok(make_rows(1).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(2).first_row(), + Ok(make_rows(2).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(3).first_row(), + Ok(make_rows(3).into_iter().next().unwrap()) + ); + } + + #[test] + fn first_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::RowsEmpty) + ); + assert_eq!( + make_rows_query_result(1).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(2).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(3).first_row_typed::<(i32,)>(), + Ok((0,)) + ); + + assert_matches!( + make_string_rows_query_result(2).first_row_typed::<(i32,)>(), + Err(FirstRowTypedError::FromRowError(_)) + ); + } + + #[test] + fn maybe_first_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().maybe_first_row(), + Err(RowsExpectedError) + ); + assert_eq!(make_rows_query_result(0).maybe_first_row(), Ok(None)); + assert_eq!( + make_rows_query_result(1).maybe_first_row(), + Ok(Some(make_rows(1).into_iter().next().unwrap())) + ); + assert_eq!( + make_rows_query_result(2).maybe_first_row(), + Ok(Some(make_rows(2).into_iter().next().unwrap())) + ); + assert_eq!( + make_rows_query_result(3).maybe_first_row(), + Ok(Some(make_rows(3).into_iter().next().unwrap())) + ); + } + + #[test] + fn maybe_first_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().maybe_first_row_typed::<(i32,)>(), + Err(MaybeFirstRowTypedError::RowsExpected(RowsExpectedError)) + ); + + assert_eq!( + make_rows_query_result(0).maybe_first_row_typed::<(i32,)>(), + Ok(None) + ); + + assert_eq!( + make_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_eq!( + make_rows_query_result(2).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_eq!( + make_rows_query_result(3).maybe_first_row_typed::<(i32,)>(), + Ok(Some((0,))) + ); + + assert_matches!( + make_string_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), + Err(MaybeFirstRowTypedError::FromRowError(_)) + ) + } + + #[test] + fn single_row_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().single_row(), + Err(SingleRowError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).single_row(), + Err(SingleRowError::BadNumberOfRows(0)) + ); + assert_eq!( + make_rows_query_result(1).single_row(), + Ok(make_rows(1).into_iter().next().unwrap()) + ); + assert_eq!( + make_rows_query_result(2).single_row(), + Err(SingleRowError::BadNumberOfRows(2)) + ); + assert_eq!( + make_rows_query_result(3).single_row(), + Err(SingleRowError::BadNumberOfRows(3)) + ); + } + + #[test] + fn single_row_typed_test() { + setup_tracing(); + assert_eq!( + make_not_rows_query_result().single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::RowsExpected(RowsExpectedError)) + ); + assert_eq!( + make_rows_query_result(0).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(0)) + ); + assert_eq!( + make_rows_query_result(1).single_row_typed::<(i32,)>(), + Ok((0,)) + ); + assert_eq!( + make_rows_query_result(2).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(2)) + ); + assert_eq!( + make_rows_query_result(3).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::BadNumberOfRows(3)) + ); + + assert_matches!( + make_string_rows_query_result(1).single_row_typed::<(i32,)>(), + Err(SingleRowTypedError::FromRowError(_)) + ); + } +} diff --git a/scylla/src/transport/mod.rs b/scylla/src/transport/mod.rs index 45befce15..be4cfa37b 100644 --- a/scylla/src/transport/mod.rs +++ b/scylla/src/transport/mod.rs @@ -7,6 +7,7 @@ pub mod errors; pub mod execution_profile; pub mod host_filter; pub mod iterator; +pub mod legacy_query_result; pub mod load_balancing; pub mod locator; pub(crate) mod metrics; diff --git a/scylla/src/transport/query_result.rs b/scylla/src/transport/query_result.rs index b60485afb..eedcb34a1 100644 --- a/scylla/src/transport/query_result.rs +++ b/scylla/src/transport/query_result.rs @@ -1,625 +1,774 @@ -use std::sync::Arc; +use std::fmt::Debug; -use crate::frame::response::cql_to_rust::{FromRow, FromRowError}; -use crate::frame::response::result::ColumnSpec; -use crate::frame::response::result::Row; -use crate::transport::session::{IntoTypedRows, TypedRowIter}; -use scylla_cql::frame::response::result::ResultMetadata; use thiserror::Error; use uuid::Uuid; -/// Result of a single query\ -/// Contains all rows returned by the database and some more information -#[derive(Debug)] -pub struct QueryResult { - /// Rows returned by the database.\ - /// Queries like `SELECT` will have `Some(Vec)`, while queries like `INSERT` will have `None`.\ - /// Can contain an empty Vec. - pub rows: Option>, - /// Warnings returned by the database - pub warnings: Vec, - /// CQL Tracing uuid - can only be Some if tracing is enabled for this query - pub tracing_id: Option, - /// Metadata returned along with this response. - pub(crate) metadata: Option>>, - /// The original size of the serialized rows in request - pub serialized_size: usize, +use scylla_cql::frame::frame_errors::RowsParseError; +use scylla_cql::frame::response::result::{ + ColumnSpec, ColumnType, DeserializedMetadataAndRawRows, RawMetadataAndRawRows, Row, TableSpec, +}; +use scylla_cql::types::deserialize::result::TypedRowIterator; +use scylla_cql::types::deserialize::row::DeserializeRow; +use scylla_cql::types::deserialize::{DeserializationError, TypeCheckError}; + +use super::legacy_query_result::LegacyQueryResult; + +/// A view over specification of a table in the database. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct TableSpecView<'res> { + table_name: &'res str, + ks_name: &'res str, } -impl QueryResult { - pub(crate) fn mock_empty() -> Self { +impl<'res> TableSpecView<'res> { + pub(crate) fn new_from_table_spec(spec: &'res TableSpec) -> Self { Self { - rows: None, - warnings: Vec::new(), - tracing_id: None, - metadata: None, - serialized_size: 0, + table_name: spec.table_name(), + ks_name: spec.ks_name(), } } - /// Returns the number of received rows.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn rows_num(&self) -> Result { - match &self.rows { - Some(rows) => Ok(rows.len()), - None => Err(RowsExpectedError), - } + /// The name of the table. + #[inline] + pub fn table_name(&self) -> &'res str { + self.table_name } - /// Returns the received rows when present.\ - /// If `QueryResult.rows` is `None`, which means that this query is not supposed to return rows (e.g `INSERT`), returns an error.\ - /// Can return an empty `Vec`. - pub fn rows(self) -> Result, RowsExpectedError> { - match self.rows { - Some(rows) => Ok(rows), - None => Err(RowsExpectedError), - } + /// The name of the keyspace the table resides in. + #[inline] + pub fn ks_name(&self) -> &'res str { + self.ks_name } +} - /// Returns the received rows parsed as the given type.\ - /// Equal to `rows()?.into_typed()`.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn rows_typed(self) -> Result, RowsExpectedError> { - Ok(self.rows()?.into_typed()) - } +/// A view over specification of a column returned by the database. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub struct ColumnSpecView<'res> { + table_spec: TableSpecView<'res>, + name: &'res str, + typ: &'res ColumnType<'res>, +} - /// Returns `Ok` for a result of a query that shouldn't contain any rows.\ - /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ - /// Opposite of [`rows()`](QueryResult::rows). - pub fn result_not_rows(&self) -> Result<(), RowsNotExpectedError> { - match self.rows { - Some(_) => Err(RowsNotExpectedError), - None => Ok(()), +impl<'res> ColumnSpecView<'res> { + pub(crate) fn new_from_column_spec(spec: &'res ColumnSpec) -> Self { + Self { + table_spec: TableSpecView::new_from_table_spec(spec.table_spec()), + name: spec.name(), + typ: spec.typ(), } } - /// Returns rows when `QueryResult.rows` is `Some`, otherwise an empty Vec.\ - /// Equal to `rows().unwrap_or_default()`. - pub fn rows_or_empty(self) -> Vec { - self.rows.unwrap_or_default() - } - - /// Returns rows parsed as the given type.\ - /// When `QueryResult.rows` is `None`, returns 0 rows.\ - /// Equal to `rows_or_empty().into_typed::()`. - pub fn rows_typed_or_empty(self) -> TypedRowIter { - self.rows_or_empty().into_typed::() + /// Returns a view over specification of the table the column is part of. + #[inline] + pub fn table_spec(&self) -> TableSpecView<'res> { + self.table_spec } - /// Returns first row from the received rows.\ - /// When the first row is not available, returns an error. - pub fn first_row(self) -> Result { - match self.maybe_first_row()? { - Some(row) => Ok(row), - None => Err(FirstRowError::RowsEmpty), - } + /// The column's name. + #[inline] + pub fn name(&self) -> &'res str { + self.name } - /// Returns first row from the received rows parsed as the given type.\ - /// When the first row is not available, returns an error. - pub fn first_row_typed(self) -> Result { - Ok(self.first_row()?.into_typed()?) + /// The column's CQL type. + #[inline] + pub fn typ(&self) -> &'res ColumnType { + self.typ } +} - /// Returns `Option` containing the first of a result.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn maybe_first_row(self) -> Result, RowsExpectedError> { - Ok(self.rows()?.into_iter().next()) - } +/// A view over specification of columns returned by the database. +#[derive(Debug, Clone, Copy)] +pub struct ColumnSpecs<'res> { + specs: &'res [ColumnSpec<'res>], +} - /// Returns `Option` containing the first of a result.\ - /// Fails when the query isn't of a type that could return rows, same as [`rows()`](QueryResult::rows). - pub fn maybe_first_row_typed( - self, - ) -> Result, MaybeFirstRowTypedError> { - match self.maybe_first_row()? { - Some(row) => Ok(Some(row.into_typed::()?)), - None => Ok(None), - } +impl<'res> ColumnSpecs<'res> { + pub(crate) fn new(specs: &'res [ColumnSpec<'res>]) -> Self { + Self { specs } } - /// Returns the only received row.\ - /// Fails if the result is anything else than a single row.\ - pub fn single_row(self) -> Result { - let rows: Vec = self.rows()?; - - if rows.len() != 1 { - return Err(SingleRowError::BadNumberOfRows(rows.len())); - } - - Ok(rows.into_iter().next().unwrap()) + pub(crate) fn inner(&self) -> &'res [ColumnSpec<'res>] { + self.specs } - /// Returns the only received row parsed as the given type.\ - /// Fails if the result is anything else than a single row.\ - pub fn single_row_typed(self) -> Result { - Ok(self.single_row()?.into_typed::()?) + /// Returns number of columns. + #[allow(clippy::len_without_is_empty)] + #[inline] + pub fn len(&self) -> usize { + self.specs.len() } - /// Returns column specifications. + /// Returns specification of k-th column returned from the database. #[inline] - pub fn col_specs(&self) -> &[ColumnSpec<'static>] { - self.metadata - .as_ref() - .map(|metadata| metadata.col_specs()) - .unwrap_or_default() + pub fn get_by_index(&self, k: usize) -> Option> { + self.specs.get(k).map(ColumnSpecView::new_from_column_spec) } - /// Returns a column specification for a column with given name, or None if not found + /// Returns specification of the column with given name returned from the database. #[inline] - pub fn get_column_spec<'a>(&'a self, name: &str) -> Option<(usize, &'a ColumnSpec<'static>)> { - self.col_specs() + pub fn get_by_name(&self, name: &str) -> Option<(usize, ColumnSpecView<'res>)> { + self.specs .iter() .enumerate() - .find(|(_id, spec)| spec.name() == name) + .find(|(_idx, spec)| spec.name() == name) + .map(|(idx, spec)| (idx, ColumnSpecView::new_from_column_spec(spec))) } -} -/// [`QueryResult::rows()`](QueryResult::rows) or a similar function called on a bad QueryResult.\ -/// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ -/// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ -/// It is `None` for queries that can't return rows (e.g `INSERT`). -#[derive(Debug, Clone, Error, PartialEq, Eq)] -#[error( - "QueryResult::rows() or similar function called on a bad QueryResult. - Expected QueryResult.rows to be Some, but it was None. - QueryResult.rows is Some for queries that can return rows (e.g SELECT). - It is None for queries that can't return rows (e.g INSERT)." -)] -pub struct RowsExpectedError; - -/// [`QueryResult::result_not_rows()`](QueryResult::result_not_rows) called on a bad QueryResult.\ -/// Expected `QueryResult.rows` to be `None`, but it was `Some`.\ -/// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ -/// It is `None` for queries that can't return rows (e.g `INSERT`). -#[derive(Debug, Clone, Error, PartialEq, Eq)] -#[error( - "QueryResult::result_not_rows() called on a bad QueryResult. - Expected QueryResult.rows to be None, but it was Some. - QueryResult.rows is Some for queries that can return rows (e.g SELECT). - It is None for queries that can't return rows (e.g INSERT)." -)] -pub struct RowsNotExpectedError; - -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum FirstRowError { - /// [`QueryResult::first_row()`](QueryResult::first_row) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Rows in `QueryResult` are empty - #[error("Rows in QueryResult are empty")] - RowsEmpty, + /// Returns iterator over specification of columns returned from the database, + /// ordered by column order in the response. + #[inline] + pub fn iter(&self) -> impl Iterator> { + self.specs.iter().map(ColumnSpecView::new_from_column_spec) + } } -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum FirstRowTypedError { - /// [`QueryResult::first_row_typed()`](QueryResult::first_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Rows in `QueryResult` are empty - #[error("Rows in QueryResult are empty")] - RowsEmpty, - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), +/// Result of a single request to the database. It represents any kind of Result frame. +/// +/// The received rows and metadata, which are present if the frame is of Result:Rows kind, +/// are kept in a raw binary form. To deserialize and access them, transform `QueryResult` +/// to [QueryRowsResult] by calling [QueryResult::into_rows_result]. +/// +/// NOTE: this is a result of a single CQL request. If you use paging for your query, +/// this will contain exactly one page. +#[derive(Debug)] +pub struct QueryResult { + raw_metadata_and_rows: Option, + tracing_id: Option, + warnings: Vec, } -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum MaybeFirstRowTypedError { - /// [`QueryResult::maybe_first_row_typed()`](QueryResult::maybe_first_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`. - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), -} +impl QueryResult { + pub(crate) fn new( + raw_rows: Option, + tracing_id: Option, + warnings: Vec, + ) -> Self { + Self { + raw_metadata_and_rows: raw_rows, + tracing_id, + warnings, + } + } -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum SingleRowError { - /// [`QueryResult::single_row()`](QueryResult::single_row) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Expected a single row, found other number of rows - #[error("Expected a single row, found {0} rows")] - BadNumberOfRows(usize), -} + // Preferred to implementing Default, because users shouldn't be able to create + // an empty QueryResult. + // + // For now unused, but it will be used once Session's API is migrated + // to the new QueryResult. + #[allow(dead_code)] + pub(crate) fn mock_empty() -> Self { + Self { + raw_metadata_and_rows: None, + tracing_id: None, + warnings: Vec::new(), + } + } -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum SingleRowTypedError { - /// [`QueryResult::single_row_typed()`](QueryResult::single_row_typed) called on a bad QueryResult.\ - /// Expected `QueryResult.rows` to be `Some`, but it was `None`.\ - /// `QueryResult.rows` is `Some` for queries that can return rows (e.g `SELECT`).\ - /// It is `None` for queries that can't return rows (e.g `INSERT`). - #[error(transparent)] - RowsExpected(#[from] RowsExpectedError), - - /// Expected a single row, found other number of rows - #[error("Expected a single row, found {0} rows")] - BadNumberOfRows(usize), - - /// Parsing row as the given type failed - #[error(transparent)] - FromRowError(#[from] FromRowError), -} + pub(crate) fn raw_metadata_and_rows(&self) -> Option<&RawMetadataAndRawRows> { + self.raw_metadata_and_rows.as_ref() + } -impl From for FirstRowTypedError { - fn from(err: FirstRowError) -> FirstRowTypedError { - match err { - FirstRowError::RowsExpected(e) => FirstRowTypedError::RowsExpected(e), - FirstRowError::RowsEmpty => FirstRowTypedError::RowsEmpty, - } + /// Warnings emitted by the database. + #[inline] + pub fn warnings(&self) -> impl Iterator { + self.warnings.iter().map(String::as_str) } -} -impl From for SingleRowTypedError { - fn from(err: SingleRowError) -> SingleRowTypedError { - match err { - SingleRowError::RowsExpected(e) => SingleRowTypedError::RowsExpected(e), - SingleRowError::BadNumberOfRows(r) => SingleRowTypedError::BadNumberOfRows(r), - } + /// Tracing ID associated with this CQL request. + #[inline] + pub fn tracing_id(&self) -> Option { + self.tracing_id } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - frame::response::result::{CqlValue, Row}, - test_utils::setup_tracing, - }; - use std::convert::TryInto; + /// Returns a bool indicating the current response is of Rows type. + #[inline] + pub fn is_rows(&self) -> bool { + self.raw_metadata_and_rows.is_some() + } - use assert_matches::assert_matches; - use scylla_cql::frame::response::result::{ColumnType, TableSpec}; - - // Returns specified number of rows, each one containing one int32 value. - // Values are 0, 1, 2, 3, 4, ... - fn make_rows(rows_num: usize) -> Vec { - let mut rows: Vec = Vec::with_capacity(rows_num); - for cur_value in 0..rows_num { - let int_val: i32 = cur_value.try_into().unwrap(); - rows.push(Row { - columns: vec![Some(CqlValue::Int(int_val))], - }); + /// Returns `Ok` for a request's result that shouldn't contain any rows.\ + /// Will return `Ok` for `INSERT` result, but a `SELECT` result, even an empty one, will cause an error.\ + /// Opposite of [QueryResult::into_rows_result]. + #[inline] + pub fn result_not_rows(&self) -> Result<(), ResultNotRowsError> { + match &self.raw_metadata_and_rows { + Some(_) => Err(ResultNotRowsError), + None => Ok(()), } - rows } - // Just like make_rows, but each column has one String value - // values are "val0", "val1", "val2", ... - fn make_string_rows(rows_num: usize) -> Vec { - let mut rows: Vec = Vec::with_capacity(rows_num); - for cur_value in 0..rows_num { - rows.push(Row { - columns: vec![Some(CqlValue::Text(format!("val{}", cur_value)))], - }); + /// Transforms itself into the Rows result type to enable deserializing rows. + /// Deserializes result metadata and allocates it. + /// + /// Returns `None` if the response is not of Rows kind. + /// + /// ```rust + /// # use scylla::transport::query_result::{QueryResult, QueryRowsResult}; + /// # fn example(query_result: QueryResult) -> Result<(), Box> { + /// let maybe_rows_result = query_result.into_rows_result()?; + /// if let Some(rows_result) = maybe_rows_result { + /// let mut rows_iter = rows_result.rows::<(i32, &str)>()?; + /// while let Some((num, text)) = rows_iter.next().transpose()? { + /// // do something with `num` and `text`` + /// } + /// } else { + /// // Response was not Result:Rows, but some other kind of Result. + /// } + /// + /// Ok(()) + /// # } + /// + /// ``` + pub fn into_rows_result(self) -> Result, RowsParseError> { + let QueryResult { + raw_metadata_and_rows, + tracing_id, + warnings, + } = self; + raw_metadata_and_rows + .map(|raw_rows| { + let raw_rows_with_metadata = raw_rows.deserialize_metadata()?; + Ok(QueryRowsResult { + raw_rows_with_metadata, + warnings, + tracing_id, + }) + }) + .transpose() + } + + /// Transforms itself into the legacy result type, by eagerly deserializing rows + /// into the Row type. This is inefficient, and should only be used during transition + /// period to the new API. + pub fn into_legacy_result(self) -> Result { + if let Some(raw_rows) = self.raw_metadata_and_rows { + let raw_rows_with_metadata = raw_rows.deserialize_metadata()?; + + let deserialized_rows = raw_rows_with_metadata + .rows_iter::()? + .collect::, DeserializationError>>()?; + let serialized_size = raw_rows_with_metadata.rows_bytes_size(); + let metadata = raw_rows_with_metadata.into_metadata(); + + Ok(LegacyQueryResult { + rows: Some(deserialized_rows), + warnings: self.warnings, + tracing_id: self.tracing_id, + metadata: Some(metadata), + serialized_size, + }) + } else { + Ok(LegacyQueryResult { + rows: None, + warnings: self.warnings, + tracing_id: self.tracing_id, + metadata: None, + serialized_size: 0, + }) } - rows } +} - fn make_test_metadata() -> ResultMetadata<'static> { - let table_spec = TableSpec::borrowed("some_keyspace", "some_table"); +/// Enables deserialization of rows received from the database in a [`QueryResult`]. +/// +/// Upon creation, it deserializes result metadata and allocates it. +/// +/// This struct provides generic methods which enable typed access to the data, +/// by deserializing rows on the fly to the type provided as a type parameter. +/// Those methods are: +/// - rows() - for iterating through rows, +/// - first_row() and maybe_first_row() - for accessing the first row first, +/// - single_row() - for accessing the first row, additionally asserting +/// that it's the only one in the response. +/// +/// ```rust +/// # use scylla::transport::query_result::QueryResult; +/// # fn example(query_result: QueryResult) -> Result<(), Box> { +/// let maybe_rows_result = query_result.into_rows_result()?; +/// if let Some(rows_result) = maybe_rows_result { +/// let mut rows_iter = rows_result.rows::<(i32, &str)>()?; +/// while let Some((num, text)) = rows_iter.next().transpose()? { +/// // do something with `num` and `text`` +/// } +/// } else { +/// // Response was not Result:Rows, but some other kind of Result. +/// } +/// +/// Ok(()) +/// # } +/// +/// ``` +#[derive(Debug)] +pub struct QueryRowsResult { + raw_rows_with_metadata: DeserializedMetadataAndRawRows, + tracing_id: Option, + warnings: Vec, +} - let column_spec = ColumnSpec::borrowed("column0", ColumnType::Int, table_spec); +impl QueryRowsResult { + /// Warnings emitted by the database. + #[inline] + pub fn warnings(&self) -> impl Iterator { + self.warnings.iter().map(String::as_str) + } - ResultMetadata::new_for_test(1, vec![column_spec]) + /// Tracing ID associated with this CQL request. + #[inline] + pub fn tracing_id(&self) -> Option { + self.tracing_id } - fn make_not_rows_query_result() -> QueryResult { - QueryResult { - rows: None, - warnings: vec![], - tracing_id: None, - metadata: None, - serialized_size: 0, - } + /// Returns the number of received rows. + #[inline] + pub fn rows_num(&self) -> usize { + self.raw_rows_with_metadata.rows_count() } - fn make_rows_query_result(rows_num: usize) -> QueryResult { - let mut res = make_not_rows_query_result(); - res.rows = Some(make_rows(rows_num)); - res.metadata = Some(Arc::new(make_test_metadata())); - res + /// Returns the size of the serialized rows. + #[inline] + pub fn rows_bytes_size(&self) -> usize { + self.raw_rows_with_metadata.rows_bytes_size() } - fn make_string_rows_query_result(rows_num: usize) -> QueryResult { - let mut res = make_not_rows_query_result(); - res.rows = Some(make_string_rows(rows_num)); - res.metadata = Some(Arc::new(make_test_metadata())); - res + /// Returns column specifications. + #[inline] + pub fn column_specs(&self) -> ColumnSpecs { + ColumnSpecs::new(self.raw_rows_with_metadata.metadata().col_specs()) } +} - #[test] - fn rows_num_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().rows_num(), - Err(RowsExpectedError) - ); - assert_eq!(make_rows_query_result(0).rows_num(), Ok(0)); - assert_eq!(make_rows_query_result(1).rows_num(), Ok(1)); - assert_eq!(make_rows_query_result(2).rows_num(), Ok(2)); - assert_eq!(make_rows_query_result(3).rows_num(), Ok(3)); +impl QueryRowsResult { + /// Returns the received rows when present. + /// + /// Returns an error if the rows in the response are of incorrect type. + #[inline] + pub fn rows<'frame, R: DeserializeRow<'frame, 'frame>>( + &'frame self, + ) -> Result, RowsError> { + self.raw_rows_with_metadata + .rows_iter() + .map_err(RowsError::TypeCheckFailed) + } + + /// Returns `Option` containing the first of a result. + /// + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn maybe_first_row<'frame, R: DeserializeRow<'frame, 'frame>>( + &'frame self, + ) -> Result, MaybeFirstRowError> { + self.rows::() + .map_err(|err| match err { + RowsError::TypeCheckFailed(typck_err) => { + MaybeFirstRowError::TypeCheckFailed(typck_err) + } + })? + .next() + .transpose() + .map_err(MaybeFirstRowError::DeserializationFailed) + } + + /// Returns first row from the received rows. + /// + /// When the first row is not available, returns an error. + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn first_row<'frame, R: DeserializeRow<'frame, 'frame>>( + &'frame self, + ) -> Result { + match self.maybe_first_row::() { + Ok(Some(row)) => Ok(row), + Ok(None) => Err(FirstRowError::RowsEmpty), + Err(MaybeFirstRowError::TypeCheckFailed(err)) => { + Err(FirstRowError::TypeCheckFailed(err)) + } + Err(MaybeFirstRowError::DeserializationFailed(err)) => { + Err(FirstRowError::DeserializationFailed(err)) + } + } } - #[test] - fn rows_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().rows(), Err(RowsExpectedError)); - assert_eq!(make_rows_query_result(0).rows(), Ok(vec![])); - assert_eq!(make_rows_query_result(1).rows(), Ok(make_rows(1))); - assert_eq!(make_rows_query_result(2).rows(), Ok(make_rows(2))); + /// Returns the only received row. + /// + /// Fails if the result is anything else than a single row. + /// Fails when the the rows in the response are of incorrect type, + /// or when the deserialization fails. + pub fn single_row<'frame, R: DeserializeRow<'frame, 'frame>>( + &'frame self, + ) -> Result { + match self.rows::() { + Ok(mut rows) => match rows.next() { + Some(Ok(row)) => { + if rows.rows_remaining() != 0 { + return Err(SingleRowError::UnexpectedRowCount( + rows.rows_remaining() + 1, + )); + } + Ok(row) + } + Some(Err(err)) => Err(SingleRowError::DeserializationFailed(err)), + None => Err(SingleRowError::UnexpectedRowCount(0)), + }, + Err(RowsError::TypeCheckFailed(err)) => Err(SingleRowError::TypeCheckFailed(err)), + } } +} - #[test] - fn rows_typed_test() { - setup_tracing(); - assert!(make_not_rows_query_result().rows_typed::<(i32,)>().is_err()); +/// An error returned by [`QueryRowsResult::rows`]. +#[derive(Debug, Error)] +pub enum RowsError { + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), +} - let rows0: Vec<(i32,)> = make_rows_query_result(0) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`QueryRowsResult::maybe_first_row`]. +#[derive(Debug, Error)] +pub enum MaybeFirstRowError { + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - assert_eq!(rows0, vec![]); + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - let rows1: Vec<(i32,)> = make_rows_query_result(1) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`QueryRowsResult::first_row`]. +#[derive(Debug, Error)] +pub enum FirstRowError { + /// The request response was of Rows type, but no rows were returned + #[error("The request response was of Rows type, but no rows were returned")] + RowsEmpty, - assert_eq!(rows1, vec![(0,)]); + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - let rows2: Vec<(i32,)> = make_rows_query_result(2) - .rows_typed::<(i32,)>() - .unwrap() - .map(|r| r.unwrap()) - .collect(); + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - assert_eq!(rows2, vec![(0,), (1,)]); - } +/// An error returned by [`QueryRowsResult::single_row`]. +#[derive(Debug, Error, Clone)] +pub enum SingleRowError { + /// Expected one row, but got a different count + #[error("Expected a single row, but got {0} rows")] + UnexpectedRowCount(usize), - #[test] - fn result_not_rows_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().result_not_rows(), Ok(())); - assert_eq!( - make_rows_query_result(0).result_not_rows(), - Err(RowsNotExpectedError) - ); - assert_eq!( - make_rows_query_result(1).result_not_rows(), - Err(RowsNotExpectedError) - ); - assert_eq!( - make_rows_query_result(2).result_not_rows(), - Err(RowsNotExpectedError) - ); - } + /// Type check failed + #[error("Type check failed: {0}")] + TypeCheckFailed(#[from] TypeCheckError), - #[test] - fn rows_or_empty_test() { - setup_tracing(); - assert_eq!(make_not_rows_query_result().rows_or_empty(), vec![]); - assert_eq!(make_rows_query_result(0).rows_or_empty(), make_rows(0)); - assert_eq!(make_rows_query_result(1).rows_or_empty(), make_rows(1)); - assert_eq!(make_rows_query_result(2).rows_or_empty(), make_rows(2)); - } + /// Deserialization failed + #[error("Deserialization failed: {0}")] + DeserializationFailed(#[from] DeserializationError), +} - #[test] - fn rows_typed_or_empty() { - setup_tracing(); - let rows_empty: Vec<(i32,)> = make_not_rows_query_result() - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); +/// An error returned by [`QueryResult::result_not_rows`]. +/// +/// It indicates that response to the request was, unexpectedly, of Rows kind. +#[derive(Debug, Error)] +#[error("The request response was, unexpectedly, of Rows kind")] +pub struct ResultNotRowsError; - assert_eq!(rows_empty, vec![]); +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use bytes::{Bytes, BytesMut}; + use itertools::Itertools as _; + use scylla_cql::frame::response::result::ResultMetadata; + use scylla_cql::frame::types; - let rows0: Vec<(i32,)> = make_rows_query_result(0) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); + use super::*; - assert_eq!(rows0, vec![]); + const TABLE_SPEC: TableSpec<'static> = TableSpec::borrowed("ks", "tbl"); - let rows1: Vec<(i32,)> = make_rows_query_result(1) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); + fn column_spec_infinite_iter() -> impl Iterator> { + (0..).map(|k| { + ColumnSpec::owned( + format!("col_{}", k), + match k % 3 { + 0 => ColumnType::Ascii, + 1 => ColumnType::Boolean, + 2 => ColumnType::Float, + _ => unreachable!(), + }, + TABLE_SPEC, + ) + }) + } - assert_eq!(rows1, vec![(0,)]); + #[test] + fn test_query_result() { + fn serialize_cells(cells: impl IntoIterator>>) -> Bytes { + let mut bytes = BytesMut::new(); + for cell in cells { + types::write_bytes_opt(cell, &mut bytes).unwrap(); + } + bytes.freeze() + } - let rows2: Vec<(i32,)> = make_rows_query_result(2) - .rows_typed_or_empty::<(i32,)>() - .map(|r| r.unwrap()) - .collect(); + fn sample_result_metadata(cols: usize) -> ResultMetadata<'static> { + ResultMetadata::new_for_test(cols, column_spec_infinite_iter().take(cols).collect()) + } - assert_eq!(rows2, vec![(0,), (1,)]); - } + fn sample_raw_rows(cols: usize, rows: usize) -> RawMetadataAndRawRows { + let metadata = sample_result_metadata(cols); + + static STRING: &[u8] = "MOCK".as_bytes(); + static BOOLEAN: &[u8] = &(true as i8).to_be_bytes(); + static FLOAT: &[u8] = &12341_i32.to_be_bytes(); + let cells = metadata.col_specs().iter().map(|spec| match spec.typ() { + ColumnType::Ascii => STRING, + ColumnType::Boolean => BOOLEAN, + ColumnType::Float => FLOAT, + _ => unreachable!(), + }); + let bytes = serialize_cells(cells.map(Some)); + RawMetadataAndRawRows::new_for_test(None, Some(metadata), false, rows, &bytes).unwrap() + } - #[test] - fn first_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().first_row(), - Err(FirstRowError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).first_row(), - Err(FirstRowError::RowsEmpty) - ); - assert_eq!( - make_rows_query_result(1).first_row(), - Ok(make_rows(1).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(2).first_row(), - Ok(make_rows(2).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(3).first_row(), - Ok(make_rows(3).into_iter().next().unwrap()) - ); - } + // Used to trigger DeserializationError. + fn sample_raw_rows_invalid_bytes(cols: usize, rows: usize) -> RawMetadataAndRawRows { + let metadata = sample_result_metadata(cols); - #[test] - fn first_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::RowsEmpty) - ); - assert_eq!( - make_rows_query_result(1).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(2).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(3).first_row_typed::<(i32,)>(), - Ok((0,)) - ); - - assert_matches!( - make_string_rows_query_result(2).first_row_typed::<(i32,)>(), - Err(FirstRowTypedError::FromRowError(_)) - ); - } + RawMetadataAndRawRows::new_for_test(None, Some(metadata), false, rows, &[]).unwrap() + } - #[test] - fn maybe_first_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().maybe_first_row(), - Err(RowsExpectedError) - ); - assert_eq!(make_rows_query_result(0).maybe_first_row(), Ok(None)); - assert_eq!( - make_rows_query_result(1).maybe_first_row(), - Ok(Some(make_rows(1).into_iter().next().unwrap())) - ); - assert_eq!( - make_rows_query_result(2).maybe_first_row(), - Ok(Some(make_rows(2).into_iter().next().unwrap())) - ); - assert_eq!( - make_rows_query_result(3).maybe_first_row(), - Ok(Some(make_rows(3).into_iter().next().unwrap())) - ); - } + // Check tracing ID + for tracing_id in [None, Some(Uuid::from_u128(0x_feed_dead))] { + for raw_rows in [None, Some(sample_raw_rows(7, 6))] { + let qr = QueryResult::new(raw_rows, tracing_id, vec![]); + assert_eq!(qr.tracing_id(), tracing_id); + } + } - #[test] - fn maybe_first_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().maybe_first_row_typed::<(i32,)>(), - Err(MaybeFirstRowTypedError::RowsExpected(RowsExpectedError)) - ); - - assert_eq!( - make_rows_query_result(0).maybe_first_row_typed::<(i32,)>(), - Ok(None) - ); - - assert_eq!( - make_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_eq!( - make_rows_query_result(2).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_eq!( - make_rows_query_result(3).maybe_first_row_typed::<(i32,)>(), - Ok(Some((0,))) - ); - - assert_matches!( - make_string_rows_query_result(1).maybe_first_row_typed::<(i32,)>(), - Err(MaybeFirstRowTypedError::FromRowError(_)) - ) - } + // Check warnings + for raw_rows in [None, Some(sample_raw_rows(7, 6))] { + let warnings = &["Ooops", "Meltdown..."]; + let qr = QueryResult::new( + raw_rows, + None, + warnings.iter().copied().map(String::from).collect(), + ); + assert_eq!(qr.warnings().collect_vec(), warnings); + } - #[test] - fn single_row_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().single_row(), - Err(SingleRowError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).single_row(), - Err(SingleRowError::BadNumberOfRows(0)) - ); - assert_eq!( - make_rows_query_result(1).single_row(), - Ok(make_rows(1).into_iter().next().unwrap()) - ); - assert_eq!( - make_rows_query_result(2).single_row(), - Err(SingleRowError::BadNumberOfRows(2)) - ); - assert_eq!( - make_rows_query_result(3).single_row(), - Err(SingleRowError::BadNumberOfRows(3)) - ); - } + // Check col specs + { + // Not RESULT::Rows response -> no column specs + { + let rqr = QueryResult::new(None, None, Vec::new()); + let qr = rqr.into_rows_result().unwrap(); + assert_matches!(qr, None); + } + + // RESULT::Rows response -> some column specs + { + let n = 5; + let metadata = sample_result_metadata(n); + let rr = RawMetadataAndRawRows::new_for_test(None, Some(metadata), false, 0, &[]) + .unwrap(); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + let qr = rqr.into_rows_result().unwrap().unwrap(); + let column_specs = qr.column_specs(); + assert_eq!(column_specs.len(), n); + + // By index + { + for (i, expected_col_spec) in column_spec_infinite_iter().enumerate().take(n) { + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(column_specs.get_by_index(i), Some(expected_view)); + } + + assert_matches!(column_specs.get_by_index(n), None); + } + + // By name + { + for (idx, expected_col_spec) in column_spec_infinite_iter().enumerate().take(n) + { + let name = expected_col_spec.name(); + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(column_specs.get_by_name(name), Some((idx, expected_view))); + } + + assert_matches!(column_specs.get_by_name("ala ma kota"), None); + } + + // By iter + { + for (got_view, expected_col_spec) in + column_specs.iter().zip(column_spec_infinite_iter()) + { + let expected_view = + ColumnSpecView::new_from_column_spec(&expected_col_spec); + assert_eq!(got_view, expected_view); + } + } + } + } - #[test] - fn single_row_typed_test() { - setup_tracing(); - assert_eq!( - make_not_rows_query_result().single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::RowsExpected(RowsExpectedError)) - ); - assert_eq!( - make_rows_query_result(0).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(0)) - ); - assert_eq!( - make_rows_query_result(1).single_row_typed::<(i32,)>(), - Ok((0,)) - ); - assert_eq!( - make_rows_query_result(2).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(2)) - ); - assert_eq!( - make_rows_query_result(3).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::BadNumberOfRows(3)) - ); - - assert_matches!( - make_string_rows_query_result(1).single_row_typed::<(i32,)>(), - Err(SingleRowTypedError::FromRowError(_)) - ); + // rows(), maybe_rows(), result_not_rows(), first_row(), maybe_first_row(), single_row() + // All errors are checked. + { + // Not RESULT::Rows + { + let rqr = QueryResult::new(None, None, Vec::new()); + let qr = rqr.into_rows_result().unwrap(); + assert_matches!(qr, None); + } + + // RESULT::Rows with 0 rows + { + let rr = sample_raw_rows(1, 0); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + + let qr = rqr.into_rows_result().unwrap().unwrap(); + + // Type check error + { + assert_matches!(qr.rows::<(i32,)>(), Err(RowsError::TypeCheckFailed(_))); + + assert_matches!( + qr.first_row::<(i32,)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32,)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32,)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + + // Correct type + { + assert_matches!(qr.rows::<(&str,)>(), Ok(_)); + + assert_matches!(qr.first_row::<(&str,)>(), Err(FirstRowError::RowsEmpty)); + assert_matches!(qr.maybe_first_row::<(&str,)>(), Ok(None)); + + assert_matches!( + qr.single_row::<(&str,)>(), + Err(SingleRowError::UnexpectedRowCount(0)) + ); + } + } + + // RESULT::Rows with 1 row + { + let rr_good_data = sample_raw_rows(2, 1); + let rr_bad_data = sample_raw_rows_invalid_bytes(2, 1); + let rqr_good_data = QueryResult::new(Some(rr_good_data), None, Vec::new()); + let rqr_bad_data = QueryResult::new(Some(rr_bad_data), None, Vec::new()); + + for rqr in [&rqr_good_data, &rqr_bad_data] { + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + } + + let qr_good_data = rqr_good_data.into_rows_result().unwrap().unwrap(); + let qr_bad_data = rqr_bad_data.into_rows_result().unwrap().unwrap(); + + for qr in [&qr_good_data, &qr_bad_data] { + // Type check error + { + assert_matches!( + qr.rows::<(i32, i32)>(), + Err(RowsError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.first_row::<(i32, i32)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32, i32)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32, i32)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + } + + // Correct type + { + assert_matches!(qr_good_data.rows::<(&str, bool)>(), Ok(_)); + assert_matches!(qr_bad_data.rows::<(&str, bool)>(), Ok(_)); + + assert_matches!(qr_good_data.first_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.first_row::<(&str, bool)>(), + Err(FirstRowError::DeserializationFailed(_)) + ); + assert_matches!(qr_good_data.maybe_first_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.maybe_first_row::<(&str, bool)>(), + Err(MaybeFirstRowError::DeserializationFailed(_)) + ); + + assert_matches!(qr_good_data.single_row::<(&str, bool)>(), Ok(_)); + assert_matches!( + qr_bad_data.single_row::<(&str, bool)>(), + Err(SingleRowError::DeserializationFailed(_)) + ); + } + } + + // RESULT::Rows with 2 rows + { + let rr = sample_raw_rows(2, 2); + let rqr = QueryResult::new(Some(rr), None, Vec::new()); + assert_matches!(rqr.result_not_rows(), Err(ResultNotRowsError)); + + let qr = rqr.into_rows_result().unwrap().unwrap(); + + // Type check error + { + assert_matches!(qr.rows::<(i32, i32)>(), Err(RowsError::TypeCheckFailed(_))); + + assert_matches!( + qr.first_row::<(i32, i32)>(), + Err(FirstRowError::TypeCheckFailed(_)) + ); + assert_matches!( + qr.maybe_first_row::<(i32, i32)>(), + Err(MaybeFirstRowError::TypeCheckFailed(_)) + ); + + assert_matches!( + qr.single_row::<(i32, i32)>(), + Err(SingleRowError::TypeCheckFailed(_)) + ); + } + + // Correct type + { + assert_matches!(qr.rows::<(&str, bool)>(), Ok(_)); + + assert_matches!(qr.first_row::<(&str, bool)>(), Ok(_)); + assert_matches!(qr.maybe_first_row::<(&str, bool)>(), Ok(_)); + + assert_matches!( + qr.single_row::<(&str, bool)>(), + Err(SingleRowError::UnexpectedRowCount(2)) + ); + } + } + } } } diff --git a/scylla/src/transport/session.rs b/scylla/src/transport/session.rs index 42933bdaf..1defa514b 100644 --- a/scylla/src/transport/session.rs +++ b/scylla/src/transport/session.rs @@ -4,6 +4,7 @@ use crate::batch::batch_values; #[cfg(feature = "cloud")] use crate::cloud::CloudConfig; +use crate::LegacyQueryResult; use crate::history; use crate::history::HistoryListener; @@ -17,7 +18,8 @@ use async_trait::async_trait; use futures::future::join_all; use futures::future::try_join_all; use itertools::{Either, Itertools}; -use scylla_cql::frame::response::result::{deser_cql_value, ColumnSpec, Rows}; +use scylla_cql::frame::response::result::RawMetadataAndRawRows; +use scylla_cql::frame::response::result::{deser_cql_value, ColumnSpec}; use scylla_cql::frame::response::NonErrorResponse; use scylla_cql::types::serialize::batch::BatchValues; use scylla_cql::types::serialize::row::{SerializeRow, SerializedValues}; @@ -42,15 +44,14 @@ use super::connection::QueryResponse; use super::connection::SslConfig; use super::errors::TracingProtocolError; use super::execution_profile::{ExecutionProfile, ExecutionProfileHandle, ExecutionProfileInner}; +use super::iterator::QueryPager; +use super::legacy_query_result::MaybeFirstRowTypedError; #[cfg(feature = "cloud")] use super::node::CloudEndpoint; use super::node::{InternalKnownNode, KnownNode}; use super::partitioner::PartitionerName; -use super::query_result::MaybeFirstRowTypedError; use super::topology::UntranslatedPeer; use super::{NodeRef, SelfIdentity}; -use crate::cql_to_rust::FromRow; -use crate::frame::response::cql_to_rust::FromRowError; use crate::frame::response::result; use crate::prepared_statement::PreparedStatement; use crate::query::Query; @@ -61,7 +62,7 @@ use crate::transport::cluster::{Cluster, ClusterData, ClusterNeatDebug}; use crate::transport::connection::{Connection, ConnectionConfig, VerifiedKeyspaceName}; use crate::transport::connection_pool::PoolConfig; use crate::transport::host_filter::HostFilter; -use crate::transport::iterator::{PreparedIteratorConfig, RowIterator}; +use crate::transport::iterator::{LegacyRowIterator, PreparedIteratorConfig}; use crate::transport::load_balancing::{self, RoutingInfo}; use crate::transport::metrics::Metrics; use crate::transport::node::Node; @@ -76,6 +77,10 @@ use crate::{ pub use crate::transport::connection_pool::PoolSize; +// This re-export is to preserve backward compatibility. +// Those items are no longer here not to clutter session.rs with legacy things. +pub use crate::transport::legacy_query_result::{IntoTypedRows, TypedRowIter}; + use crate::authentication::AuthenticatorProvider; #[cfg(feature = "ssl")] use openssl::ssl::SslContext; @@ -423,38 +428,6 @@ impl Default for SessionConfig { } } -/// Trait used to implement `Vec::into_typed` -// This is the only way to add custom method to Vec -pub trait IntoTypedRows { - fn into_typed(self) -> TypedRowIter; -} - -// Adds method Vec::into_typed(self) -// It transforms the Vec into iterator mapping to custom row type -impl IntoTypedRows for Vec { - fn into_typed(self) -> TypedRowIter { - TypedRowIter { - row_iter: self.into_iter(), - phantom_data: Default::default(), - } - } -} - -/// Iterator over rows parsed as the given type\ -/// Returned by `rows.into_typed::<(...)>()` -pub struct TypedRowIter { - row_iter: std::vec::IntoIter, - phantom_data: std::marker::PhantomData, -} - -impl Iterator for TypedRowIter { - type Item = Result; - - fn next(&mut self) -> Option { - self.row_iter.next().map(RowT::from_row) - } -} - pub(crate) enum RunQueryResult { IgnoredWriteError, Completed(ResT), @@ -646,7 +619,7 @@ impl Session { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query = query.into(); let (result, paging_state_response) = self .query(&query, values, None, PagingState::start()) @@ -711,7 +684,7 @@ impl Session { query: impl Into, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let query = query.into(); self.query( &query, @@ -739,7 +712,7 @@ impl Session { values: impl SerializeRow, page_size: Option, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let execution_profile = query .get_execution_profile_handle() .unwrap_or_else(|| self.get_default_execution_profile_handle()) @@ -827,6 +800,7 @@ impl Session { let (result, paging_state) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); + let result = result.into_legacy_result()?; Ok((result, paging_state)) } @@ -906,7 +880,7 @@ impl Session { &self, query: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let query: Query = query.into(); let execution_profile = query @@ -915,20 +889,21 @@ impl Session { .access(); if values.is_empty() { - RowIterator::new_for_query( + QueryPager::new_for_query( query, execution_profile, self.cluster.get_data(), self.metrics.clone(), ) .await + .map(QueryPager::into_legacy) } else { - // Making RowIterator::new_for_query work with values is too hard (if even possible) + // Making QueryPager::new_for_query work with values is too hard (if even possible) // so instead of sending one prepare to a specific connection on each iterator query, // we fully prepare a statement beforehand. let prepared = self.prepare(query).await?; let values = prepared.serialize_values(&values)?; - RowIterator::new_for_prepared_statement(PreparedIteratorConfig { + QueryPager::new_for_prepared_statement(PreparedIteratorConfig { prepared, values, execution_profile, @@ -936,6 +911,7 @@ impl Session { metrics: self.metrics.clone(), }) .await + .map(QueryPager::into_legacy) } } @@ -1078,7 +1054,7 @@ impl Session { &self, prepared: &PreparedStatement, values: impl SerializeRow, - ) -> Result { + ) -> Result { let serialized_values = prepared.serialize_values(&values)?; let (result, paging_state) = self .execute(prepared, &serialized_values, None, PagingState::start()) @@ -1148,7 +1124,7 @@ impl Session { prepared: &PreparedStatement, values: impl SerializeRow, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let serialized_values = prepared.serialize_values(&values)?; let page_size = prepared.get_validated_page_size(); self.execute(prepared, &serialized_values, Some(page_size), paging_state) @@ -1171,7 +1147,7 @@ impl Session { serialized_values: &SerializedValues, page_size: Option, paging_state: PagingState, - ) -> Result<(QueryResult, PagingStateResponse), QueryError> { + ) -> Result<(LegacyQueryResult, PagingStateResponse), QueryError> { let values_ref = &serialized_values; let paging_state_ref = &paging_state; @@ -1262,6 +1238,7 @@ impl Session { let (result, paging_state) = response.into_query_result_and_paging_state()?; span.record_result_fields(&result); + let result = result.into_legacy_result()?; Ok((result, paging_state)) } @@ -1309,7 +1286,7 @@ impl Session { &self, prepared: impl Into, values: impl SerializeRow, - ) -> Result { + ) -> Result { let prepared = prepared.into(); let serialized_values = prepared.serialize_values(&values)?; @@ -1318,7 +1295,7 @@ impl Session { .unwrap_or_else(|| self.get_default_execution_profile_handle()) .access(); - RowIterator::new_for_prepared_statement(PreparedIteratorConfig { + QueryPager::new_for_prepared_statement(PreparedIteratorConfig { prepared, values: serialized_values, execution_profile, @@ -1326,6 +1303,7 @@ impl Session { metrics: self.metrics.clone(), }) .await + .map(QueryPager::into_legacy) } /// Perform a batch request.\ @@ -1377,7 +1355,7 @@ impl Session { &self, batch: &Batch, values: impl BatchValues, - ) -> Result { + ) -> Result { // Shard-awareness behavior for batch will be to pick shard based on first batch statement's shard // If users batch statements by shard, they will be rewarded with full shard awareness @@ -1454,10 +1432,13 @@ impl Session { .await?; let result = match run_query_result { - RunQueryResult::IgnoredWriteError => QueryResult::mock_empty(), - RunQueryResult::Completed(response) => response, + RunQueryResult::IgnoredWriteError => LegacyQueryResult::mock_empty(), + RunQueryResult::Completed(result) => { + span.record_result_fields(&result); + result.into_legacy_result()? + } }; - span.record_result_fields(&result); + Ok(result) } @@ -2176,16 +2157,15 @@ impl RequestSpan { } } - pub(crate) fn record_result_fields(&self, result: &QueryResult) { - self.span.record("result_size", result.serialized_size); - if let Some(rows) = result.rows.as_ref() { - self.span.record("result_rows", rows.len()); - } + pub(crate) fn record_raw_rows_fields(&self, raw_rows: &RawMetadataAndRawRows) { + self.span + .record("raw_result_size", raw_rows.metadata_and_rows_bytes_size()); } - pub(crate) fn record_rows_fields(&self, rows: &Rows) { - self.span.record("result_size", rows.serialized_size); - self.span.record("result_rows", rows.rows.len()); + pub(crate) fn record_result_fields(&self, query_result: &QueryResult) { + if let Some(raw_metadata_and_rows) = query_result.raw_metadata_and_rows() { + self.record_raw_rows_fields(raw_metadata_and_rows); + } } pub(crate) fn record_replicas<'a>(&'a self, replicas: &'a [(impl Borrow>, Shard)]) { diff --git a/scylla/src/transport/session_test.rs b/scylla/src/transport/session_test.rs index bfaed4d51..d4222d3b5 100644 --- a/scylla/src/transport/session_test.rs +++ b/scylla/src/transport/session_test.rs @@ -21,7 +21,7 @@ use crate::utils::test_utils::{ }; use crate::CachingSession; use crate::ExecutionProfile; -use crate::QueryResult; +use crate::LegacyQueryResult; use crate::{Session, SessionBuilder}; use assert_matches::assert_matches; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -959,7 +959,7 @@ async fn test_tracing() { async fn test_tracing_query(session: &Session, ks: String) { // A query without tracing enabled has no tracing uuid in result let untraced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); - let untraced_query_result: QueryResult = + let untraced_query_result: LegacyQueryResult = session.query_unpaged(untraced_query, &[]).await.unwrap(); assert!(untraced_query_result.tracing_id.is_none()); @@ -968,7 +968,8 @@ async fn test_tracing_query(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let traced_query_result: LegacyQueryResult = + session.query_unpaged(traced_query, &[]).await.unwrap(); assert!(traced_query_result.tracing_id.is_some()); // Querying this uuid from tracing table gives some results @@ -982,7 +983,7 @@ async fn test_tracing_execute(session: &Session, ks: String) { .await .unwrap(); - let untraced_prepared_result: QueryResult = session + let untraced_prepared_result: LegacyQueryResult = session .execute_unpaged(&untraced_prepared, &[]) .await .unwrap(); @@ -997,7 +998,7 @@ async fn test_tracing_execute(session: &Session, ks: String) { traced_prepared.config.tracing = true; - let traced_prepared_result: QueryResult = session + let traced_prepared_result: LegacyQueryResult = session .execute_unpaged(&traced_prepared, &[]) .await .unwrap(); @@ -1034,7 +1035,8 @@ async fn test_get_tracing_info(session: &Session, ks: String) { let mut traced_query: Query = Query::new(format!("SELECT * FROM {}.tab", ks)); traced_query.config.tracing = true; - let traced_query_result: QueryResult = session.query_unpaged(traced_query, &[]).await.unwrap(); + let traced_query_result: LegacyQueryResult = + session.query_unpaged(traced_query, &[]).await.unwrap(); let tracing_id: Uuid = traced_query_result.tracing_id.unwrap(); // Getting tracing info from session using this uuid works @@ -1124,7 +1126,8 @@ async fn test_tracing_batch(session: &Session, ks: String) { let mut untraced_batch: Batch = Default::default(); untraced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); - let untraced_batch_result: QueryResult = session.batch(&untraced_batch, ((),)).await.unwrap(); + let untraced_batch_result: LegacyQueryResult = + session.batch(&untraced_batch, ((),)).await.unwrap(); assert!(untraced_batch_result.tracing_id.is_none()); // Batch with tracing enabled has a tracing uuid in result @@ -1132,7 +1135,7 @@ async fn test_tracing_batch(session: &Session, ks: String) { traced_batch.append_statement(&format!("INSERT INTO {}.tab (a) VALUES('a')", ks)[..]); traced_batch.config.tracing = true; - let traced_batch_result: QueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); + let traced_batch_result: LegacyQueryResult = session.batch(&traced_batch, ((),)).await.unwrap(); assert!(traced_batch_result.tracing_id.is_some()); assert_in_tracing_table(session, traced_batch_result.tracing_id.unwrap()).await; @@ -2567,7 +2570,7 @@ async fn test_batch_lwts() { batch.append_statement("INSERT INTO tab (p1, c1, r1, r2) VALUES (0, 123, 321, 312)"); batch.append_statement("UPDATE tab SET r1 = 1 WHERE p1 = 0 AND c1 = 0 IF r2 = 0"); - let batch_res: QueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); + let batch_res: LegacyQueryResult = session.batch(&batch, ((), (), ())).await.unwrap(); // Scylla returns 5 columns, but Cassandra returns only 1 let is_scylla: bool = batch_res.col_specs().len() == 5; @@ -2579,7 +2582,11 @@ async fn test_batch_lwts() { } } -async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: QueryResult) { +async fn test_batch_lwts_for_scylla( + session: &Session, + batch: &Batch, + batch_res: LegacyQueryResult, +) { // Alias required by clippy type IntOrNull = Option; @@ -2600,7 +2607,7 @@ async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: QueryResult = + let prepared_batch_res: LegacyQueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); let prepared_batch_res_rows: Vec<(bool, IntOrNull, IntOrNull, IntOrNull, IntOrNull)> = @@ -2619,7 +2626,11 @@ async fn test_batch_lwts_for_scylla(session: &Session, batch: &Batch, batch_res: assert_eq!(prepared_batch_res_rows, expected_prepared_batch_res_rows); } -async fn test_batch_lwts_for_cassandra(session: &Session, batch: &Batch, batch_res: QueryResult) { +async fn test_batch_lwts_for_cassandra( + session: &Session, + batch: &Batch, + batch_res: LegacyQueryResult, +) { // Alias required by clippy type IntOrNull = Option; @@ -2636,7 +2647,7 @@ async fn test_batch_lwts_for_cassandra(session: &Session, batch: &Batch, batch_r assert_eq!(batch_res_rows, expected_batch_res_rows); let prepared_batch: Batch = session.prepare_batch(batch).await.unwrap(); - let prepared_batch_res: QueryResult = + let prepared_batch_res: LegacyQueryResult = session.batch(&prepared_batch, ((), (), ())).await.unwrap(); // Returned columns are: diff --git a/scylla/tests/integration/tablets.rs b/scylla/tests/integration/tablets.rs index 3f1356840..2bdf96987 100644 --- a/scylla/tests/integration/tablets.rs +++ b/scylla/tests/integration/tablets.rs @@ -17,7 +17,7 @@ use scylla::transport::ClusterData; use scylla::transport::Node; use scylla::transport::NodeRef; use scylla::ExecutionProfile; -use scylla::QueryResult; +use scylla::LegacyQueryResult; use scylla::Session; use scylla::transport::errors::QueryError; @@ -185,7 +185,7 @@ async fn send_statement_everywhere( cluster: &ClusterData, statement: &PreparedStatement, values: &dyn SerializeRow, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| { @@ -210,7 +210,7 @@ async fn send_unprepared_query_everywhere( session: &Session, cluster: &ClusterData, query: &Query, -) -> Result, QueryError> { +) -> Result, QueryError> { let tasks = cluster.get_nodes_info().iter().flat_map(|node| { let shard_count: u16 = node.sharder().unwrap().nr_shards.into(); (0..shard_count).map(|shard| {