Skip to content

Commit

Permalink
Merge pull request #93 from ucbepic/staging
Browse files Browse the repository at this point in the history
Nits for cleaning up API.
  • Loading branch information
shreyashankar authored Oct 11, 2024
2 parents 393285b + afa57d8 commit 22d3a40
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions docetl/operations/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ def gen_embedding(self, model: str, input: List[str]) -> List[float]:

# TODO: optimize this
@freezeargs
def cached_call_llm(
def _cached_call_llm(
self,
cache_key: str,
model: str,
Expand All @@ -427,7 +427,7 @@ def cached_call_llm(
"""
Cached version of the call_llm function.
This function serves as a cached wrapper around call_llm_with_cache. It uses
This function serves as a cached wrapper around _call_llm_with_cache. It uses
the @freezeargs decorator to ensure immutable arguments and @functools.lru_cache
for caching results.
Expand All @@ -440,12 +440,12 @@ def cached_call_llm(
tools (Optional[str]): The tools to pass to the LLM.
scratchpad (Optional[str]): The scratchpad to use for the operation.
Returns:
str: The result from call_llm_with_cache.
str: The result from _call_llm_with_cache.
"""
with cache as c:
result = c.get(cache_key)
if result is None:
result = self.call_llm_with_cache(
result = self._call_llm_with_cache(
model, op_type, messages, output_schema, tools, scratchpad
)
# Only set the cache if the result tool calls or output is not empty
Expand Down Expand Up @@ -554,7 +554,7 @@ def call_llm(
rate_limited_attempt = 0
while attempt <= max_retries:
try:
return timeout(timeout_seconds)(self.cached_call_llm)(
return timeout(timeout_seconds)(self._cached_call_llm)(
key,
model,
op_type,
Expand Down Expand Up @@ -583,7 +583,7 @@ def call_llm(
return {}
attempt += 1

def call_llm_with_cache(
def _call_llm_with_cache(
self,
model: str,
op_type: str,
Expand Down Expand Up @@ -892,7 +892,7 @@ def parse_llm_response(
This function extracts the tool calls from the LLM response and returns the arguments
"""
try:
return self.parse_llm_response_helper(response, schema, tools)
return self._parse_llm_response_helper(response, schema, tools)
except InvalidOutputError as e:
if manually_fix_errors:
rprint(
Expand All @@ -909,7 +909,7 @@ def parse_llm_response(
else:
raise e

def parse_llm_response_helper(
def _parse_llm_response_helper(
self,
response: Any,
schema: Dict[str, Any] = {},
Expand Down

0 comments on commit 22d3a40

Please sign in to comment.