Skip to content

Commit

Permalink
core,langchain,community[patch]: allow langsmith 0.2 (#28598)
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan authored Dec 10, 2024
1 parent bc4dc7f commit e6a62d8
Show file tree
Hide file tree
Showing 16 changed files with 588 additions and 426 deletions.
6 changes: 3 additions & 3 deletions libs/community/langchain_community/embeddings/fastembed.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import importlib
import importlib.metadata
from typing import Any, Dict, List, Literal, Optional
from typing import Any, Dict, List, Literal, Optional, cast

import numpy as np
from langchain_core.embeddings import Embeddings
Expand Down Expand Up @@ -117,7 +117,7 @@ def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = self.model.embed(
texts, batch_size=self.batch_size, parallel=self.parallel
)
return [e.tolist() for e in embeddings]
return [cast(List[float], e.tolist()) for e in embeddings]

def embed_query(self, text: str) -> List[float]:
"""Generate query embeddings using FastEmbed.
Expand All @@ -133,4 +133,4 @@ def embed_query(self, text: str) -> List[float]:
text, batch_size=self.batch_size, parallel=self.parallel
)
)
return query_embeddings.tolist()
return cast(List[float], query_embeddings.tolist())
6 changes: 3 additions & 3 deletions libs/community/langchain_community/embeddings/laser.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, cast

import numpy as np
from langchain_core.embeddings import Embeddings
Expand Down Expand Up @@ -73,7 +73,7 @@ def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings: np.ndarray
embeddings = self._encoder_pipeline.encode_sentences(texts)

return embeddings.tolist()
return cast(List[List[float]], embeddings.tolist())

def embed_query(self, text: str) -> List[float]:
"""Generate single query text embeddings using LASER.
Expand All @@ -86,4 +86,4 @@ def embed_query(self, text: str) -> List[float]:
"""
query_embeddings: np.ndarray
query_embeddings = self._encoder_pipeline.encode_sentences([text])
return query_embeddings.tolist()[0]
return cast(List[List[float]], query_embeddings.tolist())[0]
4 changes: 2 additions & 2 deletions libs/community/langchain_community/utilities/redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import logging
import re
from typing import TYPE_CHECKING, Any, List, Optional, Pattern
from typing import TYPE_CHECKING, Any, List, Optional, Pattern, cast
from urllib.parse import urlparse

import numpy as np
Expand All @@ -18,7 +18,7 @@ def _array_to_buffer(array: List[float], dtype: Any = np.float32) -> bytes:


def _buffer_to_array(buffer: bytes, dtype: Any = np.float32) -> List[float]:
return np.frombuffer(buffer, dtype=dtype).tolist()
return cast(List[float], np.frombuffer(buffer, dtype=dtype).tolist())


class TokenEscaper:
Expand Down
6 changes: 3 additions & 3 deletions libs/community/langchain_community/vectorstores/semadb.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Iterable, List, Optional, Tuple
from typing import Any, Iterable, List, Optional, Tuple, cast
from uuid import uuid4

import numpy as np
Expand Down Expand Up @@ -111,7 +111,7 @@ def add_texts(
embed_matrix = embed_matrix / np.linalg.norm(
embed_matrix, axis=1, keepdims=True
)
embeddings = embed_matrix.tolist()
embeddings = cast(List[List[float]], embed_matrix.tolist())
# Create points
ids: List[str] = []
points = []
Expand Down Expand Up @@ -186,7 +186,7 @@ def _search_points(self, embedding: List[float], k: int = 4) -> List[dict]:
if self.distance_strategy == DistanceStrategy.COSINE:
vec = np.array(embedding)
vec = vec / np.linalg.norm(vec)
embedding = vec.tolist()
embedding = cast(List[float], vec.tolist())
# Perform search request
payload = {
"vector": embedding,
Expand Down
6 changes: 3 additions & 3 deletions libs/community/langchain_community/vectorstores/usearch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast

import numpy as np
from langchain_core.documents import Document
Expand Down Expand Up @@ -75,7 +75,7 @@ def add_texts(
self.index.add(np.array(ids), np.array(embeddings))
self.docstore.add(dict(zip(ids, documents)))
self.ids.extend(ids)
return ids.tolist()
return cast(List[str], ids.tolist())

def similarity_search_with_score(
self,
Expand Down Expand Up @@ -171,4 +171,4 @@ def from_texts(
usearch = guard_import("usearch.index")
index = usearch.Index(ndim=len(embeddings[0]), metric=metric)
index.add(np.array(ids), np.array(embeddings))
return cls(embedding, index, docstore, ids.tolist())
return cls(embedding, index, docstore, cast(List[str], ids.tolist()))
185 changes: 116 additions & 69 deletions libs/community/poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion libs/community/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ aiohttp = "^3.8.3"
tenacity = ">=8.1.0,!=8.4.0,<10"
dataclasses-json = ">= 0.5.7, < 0.7"
pydantic-settings = "^2.4.0"
langsmith = "^0.1.125"
langsmith = ">=0.1.125,<0.3"
httpx-sse = "^0.4.0"
[[tool.poetry.dependencies.numpy]]
version = ">=1.22.4,<2"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import math
import os
import tempfile
from typing import List
from typing import List, cast

import numpy as np
import pytest
Expand Down Expand Up @@ -60,13 +60,13 @@ class RandomEmbeddings(Embeddings):
"""Fake embeddings with random vectors. For testing purposes."""

def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [np.random.rand(100).tolist() for _ in texts]
return [cast(list[float], np.random.rand(100).tolist()) for _ in texts]

def embed_query(self, text: str) -> List[float]:
return np.random.rand(100).tolist()
return cast(list[float], np.random.rand(100).tolist())

def embed_image(self, uris: List[str]) -> List[List[float]]:
return [np.random.rand(100).tolist() for _ in uris]
return [cast(list[float], np.random.rand(100).tolist()) for _ in uris]


class IncrementalEmbeddings(Embeddings):
Expand Down
4 changes: 3 additions & 1 deletion libs/community/tests/unit_tests/vectorstores/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""Test vector store utility functions."""

from typing import cast

import numpy as np
from langchain_core.documents import Document

Expand Down Expand Up @@ -53,7 +55,7 @@ def test_maximal_marginal_relevance() -> None:
def test_maximal_marginal_relevance_query_dim() -> None:
query_embedding = np.random.random(size=5)
query_embedding_2d = query_embedding.reshape((1, 5))
embedding_list = np.random.random(size=(4, 5)).tolist()
embedding_list = cast(list[list[float]], np.random.random(size=(4, 5)).tolist())
first = maximal_marginal_relevance(query_embedding, embedding_list)
second = maximal_marginal_relevance(query_embedding_2d, embedding_list)
assert first == second
Expand Down
3 changes: 2 additions & 1 deletion libs/core/langchain_core/tracers/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from typing import (
TYPE_CHECKING,
Any,
Literal,
Optional,
Union,
cast,
Expand Down Expand Up @@ -141,7 +142,7 @@ def _get_trace_callbacks(
return cb


def _tracing_v2_is_enabled() -> bool:
def _tracing_v2_is_enabled() -> Union[bool, Literal["local"]]:
if tracing_v2_callback_var.get() is not None:
return True
return ls_utils.tracing_is_enabled()
Expand Down
Loading

0 comments on commit e6a62d8

Please sign in to comment.