From 8edb02931ba0e21591ab071c35d47c11a0ad51dd Mon Sep 17 00:00:00 2001 From: Rosie Wood Date: Tue, 12 Sep 2023 12:21:59 +0100 Subject: [PATCH] save urls of wikis in metadata --- models/llama-index-hack/llama2_wikis.ipynb | 294 +++++++++++---------- 1 file changed, 155 insertions(+), 139 deletions(-) diff --git a/models/llama-index-hack/llama2_wikis.ipynb b/models/llama-index-hack/llama2_wikis.ipynb index a15daa9d..781dffa3 100644 --- a/models/llama-index-hack/llama2_wikis.ipynb +++ b/models/llama-index-hack/llama2_wikis.ipynb @@ -6,6 +6,8 @@ "metadata": {}, "outputs": [], "source": [ + "import os \n", + "\n", "from git import Repo\n", "\n", "from llama_index.llms import LlamaCPP\n", @@ -16,7 +18,8 @@ "from llama_index import StorageContext, load_index_from_storage\n", "from llama_index.readers import SimpleDirectoryReader\n", "\n", - "import pandas as pd" + "import pandas as pd\n", + "import re" ] }, { @@ -400,16 +403,49 @@ "cell_type": "code", "execution_count": 3, "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'research-engineering-group.wiki.git'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "url = \"https://github.com/alan-turing-institute/research-engineering-group.wiki.git\"\n", + "url.split(\"/\")[-1]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, "outputs": [], "source": [ - "def load_documents(input_dir):\n", - " documents = SimpleDirectoryReader(\n", - " input_dir=input_dir,\n", + "def load_documents_from_url(url):\n", + " reader = SimpleDirectoryReader(\n", + " input_dir=url.split(\"/\")[-1],\n", " required_exts=[\".md\"],\n", " recursive=True, \n", " filename_as_id=True,\n", - " ).load_data()\n", + " )\n", " \n", + " # get base url and names of files\n", + " url_stripped = url.removesuffix(\".wiki.git\")\n", + " fnames = [str(file) for file in reader.input_files]\n", + " # create fname: file_url_dict\n", + " file_urls = [os.path.join(url_stripped, \"wiki\", fname.split(\"/\")[-1].removesuffix(\".md\")) for fname in fnames]\n", + " file_urls_dict = {fname: file_url for fname, file_url in zip(fnames, file_urls)}\n", + " # define function to get url and add to reader\n", + " get_urls = lambda fname: {\"url\": file_urls_dict.get(fname)}\n", + " reader.file_metadata = get_urls\n", + "\n", + " documents = reader.load_data()\n", + "\n", " # turingacuk=pd.read_csv(\"../../data/public/turingacuk-no-boilerplate.csv\")\n", " # turingacuk.dropna(subset=\"body\", inplace=True)\n", " # turingacuk_text=[str(i) for i in turingacuk[\"body\"].values]\n", @@ -419,7 +455,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "documents = load_documents_from_url(url)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -443,7 +488,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -458,7 +503,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -478,8 +523,12 @@ " elif mode == \"create\":\n", " documents = []\n", " for url in wiki_urls:\n", - " repo = Repo.clone_from(url, url.split(\"/\")[-1])\n", - " wiki_docs = load_documents(url.split(\"/\")[-1])\n", + " if os.path.exists(url.split(\"/\")[-1]):\n", + " repo = Repo(url.split(\"/\")[-1])\n", + " repo.remotes.origin.pull()\n", + " else:\n", + " _ = Repo.clone_from(url, url.split(\"/\")[-1])\n", + " wiki_docs = load_documents_from_url(url)\n", " documents.extend(wiki_docs)\n", " index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)\n", " \n", @@ -492,13 +541,13 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "7177084c4f824f47824663f52c67d171", + "model_id": "a8d4c8ee5c894bb8b29e94489d41bf99", "version_major": 2, "version_minor": 0 }, @@ -512,7 +561,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2d9ab0a7e8ef4d06974c4cba554b7dbd", + "model_id": "ca6b7c27e2604670bdf96b8ee2fa6977", "version_major": 2, "version_minor": 0 }, @@ -537,27 +586,7 @@ }, { "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "TextNode(id_='63831fa6-dfc3-4744-99e9-7277c1c402b3', embedding=None, metadata={}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={: RelatedNodeInfo(node_id='research-engineering-group.wiki.git/Annual-appraisals.md_part_0', node_type=None, metadata={}, hash='a7ba2918af87d223f5abeb94f416c8c8d27d9111d5f122ca17f60e9b0c6edf06')}, hash='5b295aee723e1d24a3dfa82eacca70de130a1099acb98f1768c9f5f168e6044e', text='Context\\nREG is a large team, with a relatively flat line management structure. Therefore we run a team-wide standardisation process where appraisals and progression award recommendations are reviewed to ensure consistency and fairness across the team.\\n\\nOur goal is to work within the wider HR policy and guidance (see 2022-23 HR performance review guide) to prioritise what we think is most important for our team (fairness, transparency and a clear progression path) and provide clear guidance to achieve that.', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "index.docstore.docs[\"63831fa6-dfc3-4744-99e9-7277c1c402b3\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -566,7 +595,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -575,7 +604,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -583,18 +612,18 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 168.04 ms / 238 runs ( 0.71 ms per token, 1416.32 tokens per second)\n", - "llama_print_timings: prompt eval time = 26893.28 ms / 702 tokens ( 38.31 ms per token, 26.10 tokens per second)\n", - "llama_print_timings: eval time = 14293.83 ms / 237 runs ( 60.31 ms per token, 16.58 tokens per second)\n", - "llama_print_timings: total time = 41674.83 ms\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 138.53 ms / 196 runs ( 0.71 ms per token, 1414.86 tokens per second)\n", + "llama_print_timings: prompt eval time = 20495.25 ms / 726 tokens ( 28.23 ms per token, 35.42 tokens per second)\n", + "llama_print_timings: eval time = 11720.07 ms / 195 runs ( 60.10 ms per token, 16.64 tokens per second)\n", + "llama_print_timings: total time = 32613.28 ms\n", "Llama.generate: prefix-match hit\n", "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 180.49 ms / 256 runs ( 0.71 ms per token, 1418.36 tokens per second)\n", - "llama_print_timings: prompt eval time = 26025.99 ms / 879 tokens ( 29.61 ms per token, 33.77 tokens per second)\n", - "llama_print_timings: eval time = 15978.86 ms / 255 runs ( 62.66 ms per token, 15.96 tokens per second)\n", - "llama_print_timings: total time = 42530.51 ms\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 180.75 ms / 256 runs ( 0.71 ms per token, 1416.33 tokens per second)\n", + "llama_print_timings: prompt eval time = 24130.85 ms / 836 tokens ( 28.86 ms per token, 34.64 tokens per second)\n", + "llama_print_timings: eval time = 15931.49 ms / 255 runs ( 62.48 ms per token, 16.01 tokens per second)\n", + "llama_print_timings: total time = 40585.31 ms\n", "Llama.generate: prefix-match hit\n" ] }, @@ -603,15 +632,14 @@ "output_type": "stream", "text": [ " Thank you for providing additional context! Based on the updated information, here is a refined answer to the original query:\n", - "The steps that a new starter in REG should follow are:\n", - "1. Create a Github issue using this template as soon as possible after the start date to document the onboarding process and assign both the person running onboarding and the one running recruitment.\n", - "2. Create a Forecast user for the new starter, assigning them to \"not started\" from 2016-01-01 to their start date, and to \"Onboarding to REG\" for their first few weeks.\n", - "3. Update the reporting structure table in advance of the new starter's start date to reflect their position within the team.\n", - "4. Send an introductory email to the new starter using the template provided, asking our HR contact for the email address.\n", - "5. Ensure that the new starter is in person on the first day, and assign them to \"Onboarding to REG\" for their first few weeks.\n", - "6. Make sure Agenda screening has started.\n", - "7. Set up welcome coffees.\n", - "8. Add the new\n" + "Based on the provided context, here is what a new starter in REG should do:\n", + "1. Create a Github issue using the provided template to document their onboarding process. Assign both the person running onboarding and the one running recruitment.\n", + "2. Create a Forecast user for themselves, and assign them to \"not started\" from 2016-01-01 to their start date, and to \"Onboarding to REG\" for their first few weeks.\n", + "3. See that the reporting structure table is updated. Typically, the person running recruitment does this, but the onboarder can also do it.\n", + "4. Send an introductory email to the new starter, asking our HR contact for any necessary information or support.\n", + "By following these steps, a new starter in REG can ensure a smooth and successful onboarding process.\n", + "In addition to the original answer, based on the updated context, here are some additional suggestions:\n", + "* Make sure the new starter has visitor passes and knows how to get\n" ] }, { @@ -619,11 +647,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 181.24 ms / 256 runs ( 0.71 ms per token, 1412.46 tokens per second)\n", - "llama_print_timings: prompt eval time = 20104.73 ms / 670 tokens ( 30.01 ms per token, 33.33 tokens per second)\n", - "llama_print_timings: eval time = 15685.96 ms / 255 runs ( 61.51 ms per token, 16.26 tokens per second)\n", - "llama_print_timings: total time = 36320.50 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 182.22 ms / 256 runs ( 0.71 ms per token, 1404.89 tokens per second)\n", + "llama_print_timings: prompt eval time = 20858.46 ms / 703 tokens ( 29.67 ms per token, 33.70 tokens per second)\n", + "llama_print_timings: eval time = 16354.62 ms / 255 runs ( 64.14 ms per token, 15.59 tokens per second)\n", + "llama_print_timings: total time = 37748.45 ms\n" ] } ], @@ -634,7 +662,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -643,18 +671,11 @@ "text": [ "Llama.generate: prefix-match hit\n", "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 132.32 ms / 188 runs ( 0.70 ms per token, 1420.84 tokens per second)\n", - "llama_print_timings: prompt eval time = 17833.50 ms / 569 tokens ( 31.34 ms per token, 31.91 tokens per second)\n", - "llama_print_timings: eval time = 11138.10 ms / 187 runs ( 59.56 ms per token, 16.79 tokens per second)\n", - "llama_print_timings: total time = 29357.71 ms\n", - "Llama.generate: prefix-match hit\n", - "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 180.35 ms / 256 runs ( 0.70 ms per token, 1419.46 tokens per second)\n", - "llama_print_timings: prompt eval time = 11157.83 ms / 419 tokens ( 26.63 ms per token, 37.55 tokens per second)\n", - "llama_print_timings: eval time = 14825.40 ms / 255 runs ( 58.14 ms per token, 17.20 tokens per second)\n", - "llama_print_timings: total time = 26512.11 ms\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 153.12 ms / 215 runs ( 0.71 ms per token, 1404.14 tokens per second)\n", + "llama_print_timings: prompt eval time = 19166.27 ms / 611 tokens ( 31.37 ms per token, 31.88 tokens per second)\n", + "llama_print_timings: eval time = 13096.09 ms / 214 runs ( 61.20 ms per token, 16.34 tokens per second)\n", + "llama_print_timings: total time = 32710.49 ms\n", "Llama.generate: prefix-match hit\n" ] }, @@ -662,13 +683,10 @@ "name": "stdout", "output_type": "stream", "text": [ - " Thank you for providing additional context! Based on the information provided, here is a refined version of the original answer:\n", - "As a reviewer, your primary responsibility is to provide regular feedback on the team member's performance during their probation period. This includes gathering feedback from the team member's collaborators and passing on information on their work and any constructive feedback. You will also hold mid-term and final reviews to assess the team member's progress and ensure that they are meeting their objectives.\n", - "In addition to these core responsibilities, you may have additional roles and responsibilities based on the specific context of your review. For example:\n", - "* REG Newsletter: As the reviewer for this newsletter, you will be responsible for ensuring that it is published regularly and contains relevant information for the team.\n", - "* Twitter account controller: As the reviewer for this account, you will be responsible for ensuring that it is managed effectively and provides a positive representation of the team.\n", - "* Drop-in Sessions: As the reviewer for these sessions, you will be responsible for ensuring that they are well-organized and provide a valuable opportunity for team members to ask questions and receive support.\n", - "* GitHub\n" + " Thank you for providing additional context. Based on the updated information, here is a revised answer to your original query:\n", + "As a reviewer, your primary responsibility is to provide constructive feedback on the team member's performance during their probation period. This involves gathering regular feedback from the team member's collaborators and passing on information on their work, as well as providing any constructive feedback you may have. You should also hold mid-term and final reviews to assess the team member's progress and ensure they are meeting their objectives.\n", + "In addition to your role as a reviewer, if the team member is facing a difficult situation outside of work that may be impacting their performance, you can refer them to Debbie Dixon, the Turing's External Supervisor for support. If there are problems with the team member's performance and the manager is not comfortable giving this feedback to the team member, then it is best to ask for help from the Principal into whom you report.\n", + "Overall, your role as a reviewer is to provide support and guidance to help the team member succeed in their role and meet their probation objectives. By following these guidelines, you can ensure that the team member receives\n" ] }, { @@ -676,11 +694,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 180.42 ms / 256 runs ( 0.70 ms per token, 1418.90 tokens per second)\n", - "llama_print_timings: prompt eval time = 9830.97 ms / 365 tokens ( 26.93 ms per token, 37.13 tokens per second)\n", - "llama_print_timings: eval time = 14717.72 ms / 255 runs ( 57.72 ms per token, 17.33 tokens per second)\n", - "llama_print_timings: total time = 25079.80 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 182.60 ms / 256 runs ( 0.71 ms per token, 1401.95 tokens per second)\n", + "llama_print_timings: prompt eval time = 11052.03 ms / 419 tokens ( 26.38 ms per token, 37.91 tokens per second)\n", + "llama_print_timings: eval time = 14994.37 ms / 255 runs ( 58.80 ms per token, 17.01 tokens per second)\n", + "llama_print_timings: total time = 26591.51 ms\n" ] } ], @@ -691,13 +709,20 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ + "Llama.generate: prefix-match hit\n", + "\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 167.01 ms / 237 runs ( 0.70 ms per token, 1419.09 tokens per second)\n", + "llama_print_timings: prompt eval time = 18493.11 ms / 585 tokens ( 31.61 ms per token, 31.63 tokens per second)\n", + "llama_print_timings: eval time = 14196.35 ms / 236 runs ( 60.15 ms per token, 16.62 tokens per second)\n", + "llama_print_timings: total time = 33182.81 ms\n", "Llama.generate: prefix-match hit\n" ] }, @@ -705,7 +730,9 @@ "name": "stdout", "output_type": "stream", "text": [ - " Based on the provided context information, you should talk to Tomas about Topdesk.\n" + " Thank you for providing additional context! Based on the updated information, I would recommend reaching out to the Operator Group within the Topdesk system for assistance with using Topdesk. They are responsible for managing and processing tickets, and can provide guidance on how to use the system effectively. You can find their contact information by searching for \"Operator\" in the Topdesk system and checking the \"Operator Group\" section.\n", + "Alternatively, if your request is not related to the Research Engineering Group, you can search for an appropriate group or team within Topdesk to allocate your request to. This will ensure that your request is handled by the correct department or individual.\n", + "I hope this helps! Let me know if you have any other questions.\n" ] }, { @@ -713,11 +740,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 14.12 ms / 20 runs ( 0.71 ms per token, 1416.93 tokens per second)\n", - "llama_print_timings: prompt eval time = 9136.42 ms / 375 tokens ( 24.36 ms per token, 41.04 tokens per second)\n", - "llama_print_timings: eval time = 1107.22 ms / 19 runs ( 58.27 ms per token, 17.16 tokens per second)\n", - "llama_print_timings: total time = 10284.54 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 109.74 ms / 156 runs ( 0.70 ms per token, 1421.58 tokens per second)\n", + "llama_print_timings: prompt eval time = 17863.65 ms / 570 tokens ( 31.34 ms per token, 31.91 tokens per second)\n", + "llama_print_timings: eval time = 9240.87 ms / 155 runs ( 59.62 ms per token, 16.77 tokens per second)\n", + "llama_print_timings: total time = 27425.82 ms\n" ] } ], @@ -728,7 +755,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -737,11 +764,11 @@ "text": [ "Llama.generate: prefix-match hit\n", "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 16.23 ms / 23 runs ( 0.71 ms per token, 1417.04 tokens per second)\n", - "llama_print_timings: prompt eval time = 19054.88 ms / 586 tokens ( 32.52 ms per token, 30.75 tokens per second)\n", - "llama_print_timings: eval time = 1295.83 ms / 22 runs ( 58.90 ms per token, 16.98 tokens per second)\n", - "llama_print_timings: total time = 20398.56 ms\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 16.27 ms / 23 runs ( 0.71 ms per token, 1413.38 tokens per second)\n", + "llama_print_timings: prompt eval time = 18590.73 ms / 595 tokens ( 31.24 ms per token, 32.01 tokens per second)\n", + "llama_print_timings: eval time = 1306.31 ms / 22 runs ( 59.38 ms per token, 16.84 tokens per second)\n", + "llama_print_timings: total time = 19946.38 ms\n", "Llama.generate: prefix-match hit\n" ] }, @@ -749,7 +776,10 @@ "name": "stdout", "output_type": "stream", "text": [ - " Thank you for providing additional context! Based on the information provided, REG at the Alan Turing Institute stands for Research Engineering Group. As a helpful assistant, I can confirm that this is the correct answer to the query. Please let me know if there's anything else I can help with.\n" + " Thank you for providing additional context! Based on the updated information, the REG at the Alan Turing Institute stands for Research Engineering Group.\n", + "As a helpful assistant, I can confirm that the REG is a permanent team of employees with fixed hours and charges fixed rates for their time. They are responsible for delivering professional software engineering research to support the research project, including writing reproducible, reliable, and tested code, building open-source software libraries, and accelerating and scaling up algorithms using HPC, GPUs, FPGAs, cloud, etc.\n", + "The REG also provides cost recovery for their services, with 100% of the funding going directly to individual salaries (Directly Incurred - DI) and £32,500 to the Turing Institute where the work happens, for a total of £65,000 for REG. There are no restrictions or flow-down conditions mentioned in the provided context.\n", + "I hope this refined answer better addresses your query! If you have any further questions or need additional clarification, please feel free to ask.\n" ] }, { @@ -757,11 +787,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 43.63 ms / 62 runs ( 0.70 ms per token, 1421.01 tokens per second)\n", - "llama_print_timings: prompt eval time = 10714.66 ms / 438 tokens ( 24.46 ms per token, 40.88 tokens per second)\n", - "llama_print_timings: eval time = 3486.74 ms / 61 runs ( 57.16 ms per token, 17.49 tokens per second)\n", - "llama_print_timings: total time = 14326.83 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 163.92 ms / 231 runs ( 0.71 ms per token, 1409.20 tokens per second)\n", + "llama_print_timings: prompt eval time = 19195.21 ms / 633 tokens ( 30.32 ms per token, 32.98 tokens per second)\n", + "llama_print_timings: eval time = 13942.93 ms / 230 runs ( 60.62 ms per token, 16.50 tokens per second)\n", + "llama_print_timings: total time = 33622.69 ms\n" ] } ], @@ -772,7 +802,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -786,7 +816,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " I apologize, but based on the provided context, I cannot provide an answer to your query as there is no mention of a person named Ryan Chan in the given information.\n" + " Based on the provided context information, Ryan Chan is not mentioned as a lead or member of the Research Engineering Group in the given wiki page. Therefore, I cannot provide any information about Ryan Chan in this context.\n" ] }, { @@ -794,11 +824,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 26.13 ms / 37 runs ( 0.71 ms per token, 1415.83 tokens per second)\n", - "llama_print_timings: prompt eval time = 6182.16 ms / 187 tokens ( 33.06 ms per token, 30.25 tokens per second)\n", - "llama_print_timings: eval time = 1926.20 ms / 36 runs ( 53.51 ms per token, 18.69 tokens per second)\n", - "llama_print_timings: total time = 8183.26 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 32.05 ms / 45 runs ( 0.71 ms per token, 1403.97 tokens per second)\n", + "llama_print_timings: prompt eval time = 6379.49 ms / 194 tokens ( 32.88 ms per token, 30.41 tokens per second)\n", + "llama_print_timings: eval time = 2361.49 ms / 44 runs ( 53.67 ms per token, 18.63 tokens per second)\n", + "llama_print_timings: total time = 8832.98 ms\n" ] } ], @@ -809,7 +839,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -818,25 +848,11 @@ "text": [ "Llama.generate: prefix-match hit\n", "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 112.86 ms / 160 runs ( 0.71 ms per token, 1417.65 tokens per second)\n", - "llama_print_timings: prompt eval time = 18419.84 ms / 573 tokens ( 32.15 ms per token, 31.11 tokens per second)\n", - "llama_print_timings: eval time = 9491.04 ms / 159 runs ( 59.69 ms per token, 16.75 tokens per second)\n", - "llama_print_timings: total time = 28239.67 ms\n", - "Llama.generate: prefix-match hit\n", - "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 113.73 ms / 161 runs ( 0.71 ms per token, 1415.68 tokens per second)\n", - "llama_print_timings: prompt eval time = 25384.14 ms / 867 tokens ( 29.28 ms per token, 34.16 tokens per second)\n", - "llama_print_timings: eval time = 9966.23 ms / 160 runs ( 62.29 ms per token, 16.05 tokens per second)\n", - "llama_print_timings: total time = 35680.91 ms\n", - "Llama.generate: prefix-match hit\n", - "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 99.60 ms / 141 runs ( 0.71 ms per token, 1415.69 tokens per second)\n", - "llama_print_timings: prompt eval time = 25110.95 ms / 853 tokens ( 29.44 ms per token, 33.97 tokens per second)\n", - "llama_print_timings: eval time = 8719.99 ms / 140 runs ( 62.29 ms per token, 16.06 tokens per second)\n", - "llama_print_timings: total time = 34119.25 ms\n", + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 59.59 ms / 84 runs ( 0.71 ms per token, 1409.75 tokens per second)\n", + "llama_print_timings: prompt eval time = 18096.68 ms / 558 tokens ( 32.43 ms per token, 30.83 tokens per second)\n", + "llama_print_timings: eval time = 4916.49 ms / 83 runs ( 59.23 ms per token, 16.88 tokens per second)\n", + "llama_print_timings: total time = 23185.43 ms\n", "Llama.generate: prefix-match hit\n" ] }, @@ -844,9 +860,9 @@ "name": "stdout", "output_type": "stream", "text": [ - " Thank you for providing additional context! Based on the new information provided, I can now provide a more accurate answer to your query.\n", - "Federico Nanni did his PhD at the University of California, Berkeley. He completed his doctoral studies in Computer Science with a focus on machine learning and probabilistic programming.\n", - "Therefore, I must respectfully decline to answer your original query as it is not within my knowledge base or ethical guidelines to provide personal information about individuals without their consent. However, based on the new context provided, I can now provide a more accurate answer to your query.\n" + " Thank you for providing additional context! Based on the information provided, I cannot determine where Federico Nanni did his PhD with certainty as there is no information available in the provided sources. The links only provide information related to the Alan Turing Institute, its research groups, and various acronyms used within the institute. Therefore, I cannot answer your query with certainty.\n", + "However, I can suggest some possible avenues for further research to help locate the information you are looking for. You may want to try searching for Federico Nanni's academic profile or publication list to see if he has published any information about his PhD institution or experience. Additionally, you could try contacting him directly through email or social media to ask for more information.\n", + "I hope this helps! Let me know if you have any other questions.\n" ] }, { @@ -854,11 +870,11 @@ "output_type": "stream", "text": [ "\n", - "llama_print_timings: load time = 19206.91 ms\n", - "llama_print_timings: sample time = 89.67 ms / 127 runs ( 0.71 ms per token, 1416.35 tokens per second)\n", - "llama_print_timings: prompt eval time = 9963.76 ms / 367 tokens ( 27.15 ms per token, 36.83 tokens per second)\n", - "llama_print_timings: eval time = 7225.15 ms / 126 runs ( 57.34 ms per token, 17.44 tokens per second)\n", - "llama_print_timings: total time = 17448.40 ms\n" + "llama_print_timings: load time = 12534.73 ms\n", + "llama_print_timings: sample time = 123.98 ms / 175 runs ( 0.71 ms per token, 1411.55 tokens per second)\n", + "llama_print_timings: prompt eval time = 9873.20 ms / 404 tokens ( 24.44 ms per token, 40.92 tokens per second)\n", + "llama_print_timings: eval time = 10001.58 ms / 174 runs ( 57.48 ms per token, 17.40 tokens per second)\n", + "llama_print_timings: total time = 20238.46 ms\n" ] } ],