diff --git a/.gitleaksignore b/.gitleaksignore index 046a0736..1c42ce39 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -19,4 +19,5 @@ a8e306437319e480ae967d24c27b7e0265a853c8:packages/valory/skills/abstract_round_a a8e306437319e480ae967d24c27b7e0265a853c8:packages/valory/skills/registration_abci/skill.yaml:generic-api-key:86 a8e306437319e480ae967d24c27b7e0265a853c8:packages/valory/skills/termination_abci/skill.yaml:generic-api-key:84 a8e306437319e480ae967d24c27b7e0265a853c8:packages/valory/skills/transaction_settlement_abci/skill.yaml:generic-api-key:92 -ed97b16cdd270e228ecd3d837da4484c09357108:packages/valory/skills/reset_pause_abci/skill.yaml:generic-api-key:80 \ No newline at end of file +ed97b16cdd270e228ecd3d837da4484c09357108:packages/valory/skills/reset_pause_abci/skill.yaml:generic-api-key:80 +0c8c92a4a5e3b94e0f8b8aadb7c104c644f23277:packages/valory/skills/subscription_abci/skill.yaml:generic-api-key:89 \ No newline at end of file diff --git a/packages/packages.json b/packages/packages.json index d2db3fda..3119b9b5 100644 --- a/packages/packages.json +++ b/packages/packages.json @@ -2,10 +2,10 @@ "dev": { "connection/valory/websocket_client/0.1.0": "bafybeiflmystocxaqblhpzqlcop2vkhsknpzjx2jomohomaxamwskeokzm", "skill/valory/contract_subscription/0.1.0": "bafybeicyugrkx5glat4p4ezwf6i7oduh26eycfie6ftd4uxrknztzl3ik4", - "agent/valory/mech/0.1.0": "bafybeifx6veisnvepeje5wtnj2ju2njszt5x5vdpn3bbljbds7tale4i2q", + "agent/valory/mech/0.1.0": "bafybeiezyvels4ylygglro3isx7cvjaku2e5cfhb2vjrw6b3gxjioa4h4a", "skill/valory/mech_abci/0.1.0": "bafybeifktlrkncnb5hjtwc7xqjag2pzmtciogp6nsm4t5umrqhmkmzyl5i", "contract/valory/agent_mech/0.1.0": "bafybeicgt3bwogpwv7t57xuxzcq7vxmoq4glcjoprabb6d4ajtgjwzsrzi", - "service/valory/mech/0.1.0": "bafybeig5pa5gnfz4bbpx6tshtqjwu2ubbdpj7s2xcjs3nxrcy7ioizw4ci", + "service/valory/mech/0.1.0": "bafybeicme3nvcsgvid2q4k2xuhg4y4h6hzfzws4a2ikuowsuquoehjy6re", "protocol/valory/acn_data_share/0.1.0": "bafybeih5ydonnvrwvy2ygfqgfabkr47s4yw3uqxztmwyfprulwfsoe7ipq", "skill/valory/task_submission_abci/0.1.0": "bafybeia6b7rzrbrhijkctxpwzeywqvsdqm2j5u5ee6fbmrp6t3x2afdase", "skill/valory/task_execution/0.1.0": "bafybeidmgp7dwelaxbul532vsebllmb6tyqbnvrifosmxwkzwgvyekyrve", diff --git a/packages/valory/agents/mech/aea-config.yaml b/packages/valory/agents/mech/aea-config.yaml index cb540a8a..ef297e6c 100644 --- a/packages/valory/agents/mech/aea-config.yaml +++ b/packages/valory/agents/mech/aea-config.yaml @@ -191,7 +191,7 @@ models: args: agent_mech_contract_addresses: ${list:["0xFf82123dFB52ab75C417195c5fDB87630145ae81"]} task_deadline: ${float:240.0} - file_hash_to_tools_json: ${list:[["bafybeibi34bhbvesmvd6o24jxvuldrwen4wj62na3lhva7k4afkg2shinu",["openai-text-davinci-002","openai-text-davinci-003","openai-gpt-3.5-turbo","openai-gpt-4"]],["bafybeiafdm3jctiz6wwo3rmo3vdubk7j7l5tumoxi5n5rc3x452mtkgyua",["stabilityai-stable-diffusion-v1-5","stabilityai-stable-diffusion-xl-beta-v2-2-2","stabilityai-stable-diffusion-512-v2-1","stabilityai-stable-diffusion-768-v2-1"]],["bafybeidpbnqbruzqlq424qt3i5dcvyqmcimshjilftabnrroujmjhdmteu",["transfer-native"]],["bafybeiglhy5epaytvt5qqdx77ld23ekouli53qrf2hjyebd5xghlunidfi",["prediction-online","prediction-offline"]]]} + file_hash_to_tools_json: ${list:[["bafybeiemk2xmhkvvglukyxp52fh7irgoubq7bydyx4i67wfrcv3c4t47sa",["claude-prediction-offline","claude-prediction-online"]],["bafybeibi34bhbvesmvd6o24jxvuldrwen4wj62na3lhva7k4afkg2shinu",["openai-text-davinci-002","openai-text-davinci-003","openai-gpt-3.5-turbo","openai-gpt-4"]],["bafybeiafdm3jctiz6wwo3rmo3vdubk7j7l5tumoxi5n5rc3x452mtkgyua",["stabilityai-stable-diffusion-v1-5","stabilityai-stable-diffusion-xl-beta-v2-2-2","stabilityai-stable-diffusion-512-v2-1","stabilityai-stable-diffusion-768-v2-1"]],["bafybeidpbnqbruzqlq424qt3i5dcvyqmcimshjilftabnrroujmjhdmteu",["transfer-native"]],["bafybeiglhy5epaytvt5qqdx77ld23ekouli53qrf2hjyebd5xghlunidfi",["prediction-online","prediction-offline"]]]} api_keys_json: ${list:[["openai", "dummy_api_key"],["stabilityai", "dummy_api_key"],["google_api_key", "dummy_api_key"],["google_engine_id", "dummy_api_key"]]} polling_interval: ${float:30.0} diff --git a/packages/valory/services/mech/service.yaml b/packages/valory/services/mech/service.yaml index 40813abc..22a5f6be 100644 --- a/packages/valory/services/mech/service.yaml +++ b/packages/valory/services/mech/service.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 fingerprint: README.md: bafybeif7ia4jdlazy6745ke2k2x5yoqlwsgwr6sbztbgqtwvs3ndm2p7ba fingerprint_ignore_patterns: [] -agent: valory/mech:0.1.0:bafybeifx6veisnvepeje5wtnj2ju2njszt5x5vdpn3bbljbds7tale4i2q +agent: valory/mech:0.1.0:bafybeiezyvels4ylygglro3isx7cvjaku2e5cfhb2vjrw6b3gxjioa4h4a number_of_agents: 4 deployment: agent: diff --git a/tools/prediction_request_claude.py b/tools/prediction_request_claude.py index 2620d025..9ca0a631 100644 --- a/tools/prediction_request_claude.py +++ b/tools/prediction_request_claude.py @@ -19,18 +19,15 @@ """This module implements a Mech tool for binary predictions.""" -from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT, Stream import json from concurrent.futures import Future, ThreadPoolExecutor -from typing import Any, Dict, Generator, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Iterator -import openai import requests -from anthropic.types import Completion +from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT from bs4 import BeautifulSoup from googleapiclient.discovery import build - NUM_URLS_EXTRACT = 5 DEFAULT_OPENAI_SETTINGS = { "max_tokens": 500, @@ -84,6 +81,10 @@ 0 indicates lowest utility; 1 maximum utility. * The sum of "p_yes" and "p_no" must equal 1. * Output only the JSON object. Do not include any other contents in your response. +* Never use Markdown syntax highlighting, such as ```json``` to surround the output. Only output the raw json string. +* This is incorrect:"```json{{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}}```" +* This is incorrect:```json"{{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}}"``` +* This is correct:"{{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}}" """ URL_QUERY_PROMPT = """ @@ -111,6 +112,10 @@ the probability that the event in "USER_PROMPT" occurs. You must provide original information in each query, and they should not overlap or lead to obtain the same set of results. * Output only the JSON object to be parsed by Python's "json.loads()". Do not include any other contents in your response. +* Never use Markdown syntax highlighting, such as ```json```. Only output the raw json string. +* This is incorrect: "```json{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}```" +* This is incorrect: ```json"{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}"``` +* This is correct: "{\n \"p_yes\": 0.2,\n \"p_no\": 0.8,\n \"confidence\": 0.7,\n \"info_utility\": 0.5\n}" """ @@ -160,7 +165,7 @@ def extract_text( def process_in_batches( urls: List[str], window: int = 5, timeout: int = 10 -) -> Generator[None, None, List[Tuple[Future, str]]]: +) -> Iterator[List[Tuple[Future, str]]]: """Iter URLs in batches.""" with ThreadPoolExecutor() as executor: for i in range(0, len(urls), window):