mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-02-01 04:14:24 +01:00
Update frontend build (#5393)
Co-authored-by: GitHub Action <action@github.com>
This commit is contained in:
@@ -1 +1 @@
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"assets/coding_tree_structure.json":["assets/coding_tree_structure.json"],"assets/data_tree_structure.json":["assets/data_tree_structure.json"],"assets/general_tree_structure.json":["assets/general_tree_structure.json"],"assets/images/autogpt_logo.png":["assets/images/autogpt_logo.png"],"assets/images/discord_logo.png":["assets/images/discord_logo.png"],"assets/images/github_logo.svg.png":["assets/images/github_logo.svg.png"],"assets/images/google_logo.svg.png":["assets/images/google_logo.svg.png"],"assets/images/twitter_logo.png":["assets/images/twitter_logo.png"],"assets/scrape_synthesize_tree_structure.json":["assets/scrape_synthesize_tree_structure.json"],"assets/tree_structure.json":["assets/tree_structure.json"],"packages/cupertino_icons/assets/CupertinoIcons.ttf":["packages/cupertino_icons/assets/CupertinoIcons.ttf"]}
|
||||
{"assets/coding_tree_structure.json":["assets/coding_tree_structure.json"],"assets/data_tree_structure.json":["assets/data_tree_structure.json"],"assets/general_tree_structure.json":["assets/general_tree_structure.json"],"assets/images/autogpt_logo.png":["assets/images/autogpt_logo.png"],"assets/images/discord_logo.png":["assets/images/discord_logo.png"],"assets/images/github_logo.svg.png":["assets/images/github_logo.svg.png"],"assets/images/google_logo.svg.png":["assets/images/google_logo.svg.png"],"assets/images/twitter_logo.png":["assets/images/twitter_logo.png"],"assets/scrape_synthesize_tree_structure.json":["assets/scrape_synthesize_tree_structure.json"],"assets/tree_structure.json":["assets/tree_structure.json"],"packages/cupertino_icons/assets/CupertinoIcons.ttf":["packages/cupertino_icons/assets/CupertinoIcons.ttf"],"packages/fluttertoast/assets/toastify.css":["packages/fluttertoast/assets/toastify.css"],"packages/fluttertoast/assets/toastify.js":["packages/fluttertoast/assets/toastify.js"]}
|
||||
@@ -6724,6 +6724,31 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
fluttertoast
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Karthik Ponnam
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
freetype2
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "261ccfaa-02a2-4c1a-8a56-c76c66f7dba1",
|
||||
"eval_id": "f219f3d3-a41b-45a9-a3d0-389832086ee8",
|
||||
"ground": {
|
||||
"answer": "The content of output.txt should be 'Hello World!'",
|
||||
"eval": {
|
||||
@@ -101,7 +101,7 @@
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [],
|
||||
"eval_id": "81b64bf9-2b6a-4ac8-bcd2-8bfe36244ac0",
|
||||
"eval_id": "021c695a-6cc4-46c2-b93a-f3a9b0f4d123",
|
||||
"ground": {
|
||||
"answer": "The word 'Washington', printed to a .txt file named anything",
|
||||
"eval": {
|
||||
@@ -133,13 +133,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 150,
|
||||
"dependencies": [
|
||||
"TestUrlShortener"
|
||||
],
|
||||
"eval_id": "54c3d7e9-71d6-476b-b045-cf0aaf118f95",
|
||||
"eval_id": "504b1648-e14a-4982-8b27-074598eb4fd0",
|
||||
"ground": {
|
||||
"answer": "The correct python file for a TicTacToe game is written",
|
||||
"eval": {
|
||||
@@ -173,7 +174,7 @@
|
||||
"dependencies": [
|
||||
"TestFileOrganizer"
|
||||
],
|
||||
"eval_id": "41ca1035-ceca-4e0c-91ab-66ed0b350273",
|
||||
"eval_id": "8106fd7f-83fd-496e-9513-280f4a3f012c",
|
||||
"ground": {
|
||||
"answer": "The correct python file for a basic url shortener CLI",
|
||||
"eval": {
|
||||
@@ -201,13 +202,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestPasswordGenerator"
|
||||
],
|
||||
"eval_id": "6ace62be-6c18-431a-947f-72fb20984b58",
|
||||
"eval_id": "029c1e6f-2b36-451e-bca6-60063b827d2e",
|
||||
"ground": {
|
||||
"answer": "The correct python file is written and organizes the files accordingly",
|
||||
"eval": {
|
||||
@@ -241,7 +243,7 @@
|
||||
"dependencies": [
|
||||
"TestThreeSum"
|
||||
],
|
||||
"eval_id": "0823b577-64f2-477b-856d-16726fe464b0",
|
||||
"eval_id": "ac75c471-e0ce-400c-ba9a-fb72aaab444f",
|
||||
"ground": {
|
||||
"answer": "password_generator.py is created and satisfies the requirements.",
|
||||
"eval": {
|
||||
@@ -269,13 +271,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "29a10990-2584-4602-8b9d-c217f6edbc4f",
|
||||
"eval_id": "a1ff38a4-1032-4bf2-960a-3b927f9936f4",
|
||||
"ground": {
|
||||
"answer": "The three_sum function coded properly.",
|
||||
"eval": {
|
||||
@@ -307,14 +310,15 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestTicTacToe",
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "5a32418d-1c3a-4af1-8dc4-8d4c29bed21a",
|
||||
"eval_id": "4d613d05-475f-4f72-bf12-f6d3714340c1",
|
||||
"ground": {
|
||||
"answer": "The implementation of battleship that passes all the tests.",
|
||||
"eval": {
|
||||
|
||||
@@ -6,11 +6,29 @@
|
||||
"id": "agbenchmark/generate_test.py::TestWriteFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
|
||||
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
@@ -23,6 +41,12 @@
|
||||
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
@@ -39,7 +63,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "261ccfaa-02a2-4c1a-8a56-c76c66f7dba1",
|
||||
"eval_id": "f219f3d3-a41b-45a9-a3d0-389832086ee8",
|
||||
"ground": {
|
||||
"answer": "The content of output.txt should be 'Hello World!'",
|
||||
"eval": {
|
||||
@@ -77,7 +101,7 @@
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [],
|
||||
"eval_id": "81b64bf9-2b6a-4ac8-bcd2-8bfe36244ac0",
|
||||
"eval_id": "021c695a-6cc4-46c2-b93a-f3a9b0f4d123",
|
||||
"ground": {
|
||||
"answer": "The word 'Washington', printed to a .txt file named anything",
|
||||
"eval": {
|
||||
@@ -111,48 +135,126 @@
|
||||
"category": [
|
||||
"data"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
"TestAnswerQuestionSmallCsv"
|
||||
],
|
||||
"eval_id": "db4654d7-fc97-4290-ab27-a710c2b5ce15",
|
||||
"eval_id": "bb6e0a4b-7faf-4aa6-a524-548cddbc2732",
|
||||
"ground": {
|
||||
"answer": "The csv sorted by date",
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.csv"
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"id,name,timestamp\n1,Bob,2023-09-24 12:05:00\n2,Charlie,2023-09-24 12:10:00\n3,Alice,2023-09-25 14:10:00\n4,David,2023-09-26 16:20:00"
|
||||
"1861"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can sort a csv",
|
||||
"difficulty": "basic",
|
||||
"description": "Tests if the agent can answer a question from a csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestSortCsv",
|
||||
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
|
||||
"name": "TestAnswerQuestionCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"label": "SortCsv",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data"
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "9df3f07a-5047-488f-b788-1e1f57eba970",
|
||||
"ground": {
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"84"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can answer a question from a small csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestAnswerQuestionSmallCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionSmallCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 120,
|
||||
"dependencies": [
|
||||
"TestAnswerQuestionCsv",
|
||||
"TestCombineCsv"
|
||||
],
|
||||
"eval_id": "b1bb61cd-3d09-4a69-bb2a-9dbb3c477589",
|
||||
"ground": {
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"1861"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can answer a question from a csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestAnswerQuestionCombineCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionCombineCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestLabelCsv"
|
||||
],
|
||||
"eval_id": "d5f04342-983f-45a4-b84a-fe8d96863375",
|
||||
"eval_id": "52467beb-b951-4356-9776-9a0ae46bb33b",
|
||||
"ground": {
|
||||
"answer": "The csv data is combined",
|
||||
"eval": {
|
||||
@@ -189,7 +291,7 @@
|
||||
"dependencies": [
|
||||
"TestSortCsv"
|
||||
],
|
||||
"eval_id": "6c58e229-aa22-4c4f-a053-4a78931ad41e",
|
||||
"eval_id": "6e2bf1f0-6842-4704-8ed1-b17c2065bbac",
|
||||
"ground": {
|
||||
"answer": "The csv labelled",
|
||||
"eval": {
|
||||
@@ -215,6 +317,44 @@
|
||||
"id": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
|
||||
"label": "LabelCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "d59ec964-6f67-4b3d-a4de-c4436fc76f95",
|
||||
"ground": {
|
||||
"answer": "The csv sorted by date",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.csv"
|
||||
],
|
||||
"should_contain": [
|
||||
"id,name,timestamp\n1,Bob,2023-09-24 12:05:00\n2,Charlie,2023-09-24 12:10:00\n3,Alice,2023-09-25 14:10:00\n4,David,2023-09-26 16:20:00"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can sort a csv",
|
||||
"difficulty": "basic",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestSortCsv",
|
||||
"task": "Sort the input.csv by the 'timestamp' column and write the new csv in the output.csv file. The order of the columns should be preserved."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"label": "SortCsv",
|
||||
"shape": "dot"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -57,7 +57,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "261ccfaa-02a2-4c1a-8a56-c76c66f7dba1",
|
||||
"eval_id": "f219f3d3-a41b-45a9-a3d0-389832086ee8",
|
||||
"ground": {
|
||||
"answer": "The content of output.txt should be 'Hello World!'",
|
||||
"eval": {
|
||||
@@ -95,7 +95,7 @@
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [],
|
||||
"eval_id": "81b64bf9-2b6a-4ac8-bcd2-8bfe36244ac0",
|
||||
"eval_id": "021c695a-6cc4-46c2-b93a-f3a9b0f4d123",
|
||||
"ground": {
|
||||
"answer": "The word 'Washington', printed to a .txt file named anything",
|
||||
"eval": {
|
||||
@@ -134,7 +134,7 @@
|
||||
"dependencies": [
|
||||
"TestSearch"
|
||||
],
|
||||
"eval_id": "525001ed-8b45-4405-9e56-ce4423314294",
|
||||
"eval_id": "cd96e6b2-779d-4a4a-8367-d520023e27ae",
|
||||
"ground": {
|
||||
"answer": "\u00a325.89",
|
||||
"eval": {
|
||||
@@ -171,7 +171,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "6390114a-531d-4743-a51b-50ba6ff8da43",
|
||||
"eval_id": "0bb23182-b434-402b-a73e-9c226469b959",
|
||||
"ground": {
|
||||
"answer": "This is a Heading\nThis is a paragraph.",
|
||||
"eval": {
|
||||
@@ -207,13 +207,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestRevenueRetrieval2"
|
||||
],
|
||||
"eval_id": "18b14805-ff33-4076-9fb8-1e4218136f05",
|
||||
"eval_id": "1758058c-f726-484f-96fa-f05e278e5ff5",
|
||||
"ground": {
|
||||
"answer": "The twitter handles of the two hosts of Latent Space.",
|
||||
"eval": {
|
||||
@@ -252,7 +253,7 @@
|
||||
"dependencies": [
|
||||
"TestRevenueRetrieval"
|
||||
],
|
||||
"eval_id": "a0a27778-aec1-4b37-8fc2-92feedffd3fb",
|
||||
"eval_id": "552bdf23-db40-4bd1-b123-4ed820886cc1",
|
||||
"ground": {
|
||||
"answer": "15 Millions\n112 Millions\n117 Millions\n204 Millions\n413 Millions\n2,014 Millions\n3,198 Millions\n4,046 Millions\n7,000 Millions\n11,759 Millions\n21,461 Millions\n24,578 Millions\n31,536 Millions\n53,823 Millions\n81,462 Millions",
|
||||
"eval": {
|
||||
@@ -298,13 +299,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestBasicRetrieval"
|
||||
],
|
||||
"eval_id": "af95fa96-a5cb-42b0-98f3-715e6e7d0b5d",
|
||||
"eval_id": "dc2114d7-1597-4c9b-bed0-a97937ad977f",
|
||||
"ground": {
|
||||
"answer": "It was $81.462 billion in 2022. In millions the answer is 81,462.",
|
||||
"eval": {
|
||||
@@ -334,13 +336,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 240,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "76e4c56c-8d57-423e-9cc1-1fff5f58dee6",
|
||||
"eval_id": "895ae28a-4513-44ea-a872-0164771d1597",
|
||||
"ground": {
|
||||
"answer": "A report highlighting elements from the 2 files.",
|
||||
"eval": {
|
||||
|
||||
@@ -12,6 +12,12 @@
|
||||
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestReadFile::test_method[challenge_data0]",
|
||||
@@ -78,24 +84,42 @@
|
||||
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestTestGetInformation::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestRevenueRetrieval2::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestSearch::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestBasicRetrieval::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestSortCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]",
|
||||
"id": "agbenchmark/generate_test.py::TestCombineCsv::test_method[challenge_data0]_to_agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"to": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]"
|
||||
},
|
||||
{
|
||||
"arrows": "to",
|
||||
"from": "agbenchmark/generate_test.py::TestLabelCsv::test_method[challenge_data0]",
|
||||
@@ -117,7 +141,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "261ccfaa-02a2-4c1a-8a56-c76c66f7dba1",
|
||||
"eval_id": "f219f3d3-a41b-45a9-a3d0-389832086ee8",
|
||||
"ground": {
|
||||
"answer": "The content of output.txt should be 'Hello World!'",
|
||||
"eval": {
|
||||
@@ -155,7 +179,7 @@
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [],
|
||||
"eval_id": "81b64bf9-2b6a-4ac8-bcd2-8bfe36244ac0",
|
||||
"eval_id": "021c695a-6cc4-46c2-b93a-f3a9b0f4d123",
|
||||
"ground": {
|
||||
"answer": "The word 'Washington', printed to a .txt file named anything",
|
||||
"eval": {
|
||||
@@ -187,13 +211,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 150,
|
||||
"dependencies": [
|
||||
"TestUrlShortener"
|
||||
],
|
||||
"eval_id": "54c3d7e9-71d6-476b-b045-cf0aaf118f95",
|
||||
"eval_id": "504b1648-e14a-4982-8b27-074598eb4fd0",
|
||||
"ground": {
|
||||
"answer": "The correct python file for a TicTacToe game is written",
|
||||
"eval": {
|
||||
@@ -227,7 +252,7 @@
|
||||
"dependencies": [
|
||||
"TestThreeSum"
|
||||
],
|
||||
"eval_id": "0823b577-64f2-477b-856d-16726fe464b0",
|
||||
"eval_id": "ac75c471-e0ce-400c-ba9a-fb72aaab444f",
|
||||
"ground": {
|
||||
"answer": "password_generator.py is created and satisfies the requirements.",
|
||||
"eval": {
|
||||
@@ -255,13 +280,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestPasswordGenerator"
|
||||
],
|
||||
"eval_id": "6ace62be-6c18-431a-947f-72fb20984b58",
|
||||
"eval_id": "029c1e6f-2b36-451e-bca6-60063b827d2e",
|
||||
"ground": {
|
||||
"answer": "The correct python file is written and organizes the files accordingly",
|
||||
"eval": {
|
||||
@@ -289,13 +315,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "29a10990-2584-4602-8b9d-c217f6edbc4f",
|
||||
"eval_id": "a1ff38a4-1032-4bf2-960a-3b927f9936f4",
|
||||
"ground": {
|
||||
"answer": "The three_sum function coded properly.",
|
||||
"eval": {
|
||||
@@ -327,14 +354,15 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"coding"
|
||||
"coding",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestTicTacToe",
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "5a32418d-1c3a-4af1-8dc4-8d4c29bed21a",
|
||||
"eval_id": "4d613d05-475f-4f72-bf12-f6d3714340c1",
|
||||
"ground": {
|
||||
"answer": "The implementation of battleship that passes all the tests.",
|
||||
"eval": {
|
||||
@@ -366,7 +394,7 @@
|
||||
"dependencies": [
|
||||
"TestFileOrganizer"
|
||||
],
|
||||
"eval_id": "41ca1035-ceca-4e0c-91ab-66ed0b350273",
|
||||
"eval_id": "8106fd7f-83fd-496e-9513-280f4a3f012c",
|
||||
"ground": {
|
||||
"answer": "The correct python file for a basic url shortener CLI",
|
||||
"eval": {
|
||||
@@ -401,7 +429,7 @@
|
||||
"dependencies": [
|
||||
"TestSearch"
|
||||
],
|
||||
"eval_id": "525001ed-8b45-4405-9e56-ce4423314294",
|
||||
"eval_id": "cd96e6b2-779d-4a4a-8367-d520023e27ae",
|
||||
"ground": {
|
||||
"answer": "\u00a325.89",
|
||||
"eval": {
|
||||
@@ -431,13 +459,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestRevenueRetrieval2"
|
||||
],
|
||||
"eval_id": "18b14805-ff33-4076-9fb8-1e4218136f05",
|
||||
"eval_id": "1758058c-f726-484f-96fa-f05e278e5ff5",
|
||||
"ground": {
|
||||
"answer": "The twitter handles of the two hosts of Latent Space.",
|
||||
"eval": {
|
||||
@@ -476,7 +505,7 @@
|
||||
"dependencies": [
|
||||
"TestRevenueRetrieval"
|
||||
],
|
||||
"eval_id": "a0a27778-aec1-4b37-8fc2-92feedffd3fb",
|
||||
"eval_id": "552bdf23-db40-4bd1-b123-4ed820886cc1",
|
||||
"ground": {
|
||||
"answer": "15 Millions\n112 Millions\n117 Millions\n204 Millions\n413 Millions\n2,014 Millions\n3,198 Millions\n4,046 Millions\n7,000 Millions\n11,759 Millions\n21,461 Millions\n24,578 Millions\n31,536 Millions\n53,823 Millions\n81,462 Millions",
|
||||
"eval": {
|
||||
@@ -518,6 +547,43 @@
|
||||
"label": "RevenueRetrieval2",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestBasicRetrieval"
|
||||
],
|
||||
"eval_id": "dc2114d7-1597-4c9b-bed0-a97937ad977f",
|
||||
"ground": {
|
||||
"answer": "It was $81.462 billion in 2022. In millions the answer is 81,462.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
".txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"81,462"
|
||||
],
|
||||
"should_not_contain": []
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can retrieve Tesla's revenue in 2022.",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": []
|
||||
},
|
||||
"name": "TestRevenueRetrieval",
|
||||
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 billion)."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
|
||||
"label": "RevenueRetrieval",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
@@ -529,7 +595,7 @@
|
||||
"dependencies": [
|
||||
"TestWriteFile"
|
||||
],
|
||||
"eval_id": "6390114a-531d-4743-a51b-50ba6ff8da43",
|
||||
"eval_id": "0bb23182-b434-402b-a73e-9c226469b959",
|
||||
"ground": {
|
||||
"answer": "This is a Heading\nThis is a paragraph.",
|
||||
"eval": {
|
||||
@@ -565,49 +631,90 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"data"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"cutoff": 90,
|
||||
"dependencies": [
|
||||
"TestBasicRetrieval"
|
||||
"TestAnswerQuestionSmallCsv"
|
||||
],
|
||||
"eval_id": "af95fa96-a5cb-42b0-98f3-715e6e7d0b5d",
|
||||
"eval_id": "bb6e0a4b-7faf-4aa6-a524-548cddbc2732",
|
||||
"ground": {
|
||||
"answer": "It was $81.462 billion in 2022. In millions the answer is 81,462.",
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
".txt"
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"81,462"
|
||||
],
|
||||
"should_not_contain": []
|
||||
"1861"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can retrieve Tesla's revenue in 2022.",
|
||||
"description": "Tests if the agent can answer a question from a csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": []
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestRevenueRetrieval",
|
||||
"task": "Write tesla's exact revenue in 2022 into a .txt file. Use the US notation, with a precision rounded to the nearest million dollars (for instance, $31,578 billion)."
|
||||
"name": "TestAnswerQuestionCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestRevenueRetrieval::test_method[challenge_data0]",
|
||||
"label": "RevenueRetrieval",
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data"
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 120,
|
||||
"dependencies": [
|
||||
"TestAnswerQuestionCsv",
|
||||
"TestCombineCsv"
|
||||
],
|
||||
"eval_id": "b1bb61cd-3d09-4a69-bb2a-9dbb3c477589",
|
||||
"ground": {
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"1861"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can answer a question from a csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestAnswerQuestionCombineCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionCombineCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionCombineCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "db4654d7-fc97-4290-ab27-a710c2b5ce15",
|
||||
"eval_id": "d59ec964-6f67-4b3d-a4de-c4436fc76f95",
|
||||
"ground": {
|
||||
"answer": "The csv sorted by date",
|
||||
"eval": {
|
||||
@@ -638,13 +745,52 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data"
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "9df3f07a-5047-488f-b788-1e1f57eba970",
|
||||
"ground": {
|
||||
"answer": "The correct amount spent on utilities.",
|
||||
"eval": {
|
||||
"type": "file"
|
||||
},
|
||||
"files": [
|
||||
"output.txt"
|
||||
],
|
||||
"should_contain": [
|
||||
"84"
|
||||
]
|
||||
},
|
||||
"info": {
|
||||
"description": "Tests if the agent can answer a question from a small csv",
|
||||
"difficulty": "intermediate",
|
||||
"side_effects": [
|
||||
""
|
||||
]
|
||||
},
|
||||
"name": "TestAnswerQuestionSmallCsv",
|
||||
"task": "How much was spent on utilities in total ? Write the answer in an output.txt file."
|
||||
},
|
||||
"id": "agbenchmark/generate_test.py::TestAnswerQuestionSmallCsv::test_method[challenge_data0]",
|
||||
"label": "AnswerQuestionSmallCsv",
|
||||
"shape": "dot"
|
||||
},
|
||||
{
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"data",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 60,
|
||||
"dependencies": [
|
||||
"TestLabelCsv"
|
||||
],
|
||||
"eval_id": "d5f04342-983f-45a4-b84a-fe8d96863375",
|
||||
"eval_id": "52467beb-b951-4356-9776-9a0ae46bb33b",
|
||||
"ground": {
|
||||
"answer": "The csv data is combined",
|
||||
"eval": {
|
||||
@@ -681,7 +827,7 @@
|
||||
"dependencies": [
|
||||
"TestSortCsv"
|
||||
],
|
||||
"eval_id": "6c58e229-aa22-4c4f-a053-4a78931ad41e",
|
||||
"eval_id": "6e2bf1f0-6842-4704-8ed1-b17c2065bbac",
|
||||
"ground": {
|
||||
"answer": "The csv labelled",
|
||||
"eval": {
|
||||
@@ -712,13 +858,14 @@
|
||||
"color": "grey",
|
||||
"data": {
|
||||
"category": [
|
||||
"scrape_synthesize"
|
||||
"scrape_synthesize",
|
||||
"general"
|
||||
],
|
||||
"cutoff": 240,
|
||||
"dependencies": [
|
||||
"TestReadFile"
|
||||
],
|
||||
"eval_id": "76e4c56c-8d57-423e-9cc1-1fff5f58dee6",
|
||||
"eval_id": "895ae28a-4513-44ea-a872-0164771d1597",
|
||||
"ground": {
|
||||
"answer": "A report highlighting elements from the 2 files.",
|
||||
"eval": {
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
/**
|
||||
* Minified by jsDelivr using clean-css v4.2.3.
|
||||
* Original file: /npm/toastify-js@1.9.3/src/toastify.css
|
||||
*
|
||||
* Do NOT use SRI with dynamically generated files! More information: https://www.jsdelivr.com/using-sri-with-dynamic-files
|
||||
*/
|
||||
/*!
|
||||
* Toastify js 1.9.3
|
||||
* https://github.com/apvarun/toastify-js
|
||||
* @license MIT licensed
|
||||
*
|
||||
* Copyright (C) 2018 Varun A P
|
||||
*/
|
||||
.toastify{padding:12px 20px;color:#fff;display:inline-block;box-shadow:0 3px 6px -1px rgba(0,0,0,.12),0 10px 36px -4px rgba(77,96,232,.3);background:-webkit-linear-gradient(315deg,#73a5ff,#5477f5);background:linear-gradient(135deg,#73a5ff,#5477f5);position:fixed;opacity:0;transition:all .4s cubic-bezier(.215,.61,.355,1);border-radius:2px;cursor:pointer;text-decoration:none;max-width:calc(50% - 20px);z-index:2147483647}.toastify.on{opacity:1}.toast-close{opacity:.4;padding:0 5px}.toastify-right{right:15px}.toastify-left{left:15px}.toastify-top{top:-150px}.toastify-bottom{bottom:-150px}.toastify-rounded{border-radius:25px}.toastify-avatar{width:1.5em;height:1.5em;margin:-7px 5px;border-radius:2px}.toastify-center{margin-left:auto;margin-right:auto;left:0;right:0;max-width:fit-content;max-width:-moz-fit-content}@media only screen and (max-width:360px){.toastify-left,.toastify-right{margin-left:auto;margin-right:auto;left:0;right:0;max-width:fit-content}}
|
||||
@@ -0,0 +1,14 @@
|
||||
/**
|
||||
* Minified by jsDelivr using Terser v5.3.0.
|
||||
* Original file: /npm/toastify-js@1.9.3/src/toastify.js
|
||||
*
|
||||
* Do NOT use SRI with dynamically generated files! More information: https://www.jsdelivr.com/using-sri-with-dynamic-files
|
||||
*/
|
||||
/*!
|
||||
* Toastify js 1.9.3
|
||||
* https://github.com/apvarun/toastify-js
|
||||
* @license MIT licensed
|
||||
*
|
||||
* Copyright (C) 2018 Varun A P
|
||||
*/
|
||||
!function(t,o){"object"==typeof module && module && module.exports?module.exports=o():t.Toastify=o()}(this,(function(t){var o=function(t){return new o.lib.init(t)};function i(t,o){return o.offset[t]?isNaN(o.offset[t])?o.offset[t]:o.offset[t]+"px":"0px"}function s(t,o){return!(!t||"string"!=typeof o)&&!!(t.className&&t.className.trim().split(/\s+/gi).indexOf(o)>-1)}return o.lib=o.prototype={toastify:"1.9.3",constructor:o,init:function(t){return t||(t={}),this.options={},this.toastElement=null,this.options.text=t.text||"Hi there!",this.options.node=t.node,this.options.duration=0===t.duration?0:t.duration||3e3,this.options.selector=t.selector,this.options.callback=t.callback||function(){},this.options.destination=t.destination,this.options.newWindow=t.newWindow||!1,this.options.close=t.close||!1,this.options.gravity="bottom"===t.gravity?"toastify-bottom":"toastify-top",this.options.positionLeft=t.positionLeft||!1,this.options.position=t.position||"",this.options.backgroundColor=t.backgroundColor,this.options.avatar=t.avatar||"",this.options.className=t.className||"",this.options.stopOnFocus=void 0===t.stopOnFocus||t.stopOnFocus,this.options.onClick=t.onClick,this.options.offset=t.offset||{x:0,y:0},this},buildToast:function(){if(!this.options)throw"Toastify is not initialized";var t=document.createElement("div");if(t.className="toastify on "+this.options.className,this.options.position?t.className+=" toastify-"+this.options.position:!0===this.options.positionLeft?(t.className+=" toastify-left",console.warn("Property `positionLeft` will be depreciated in further versions. Please use `position` instead.")):t.className+=" toastify-right",t.className+=" "+this.options.gravity,this.options.backgroundColor&&(t.style.background=this.options.backgroundColor),this.options.node&&this.options.node.nodeType===Node.ELEMENT_NODE)t.appendChild(this.options.node);else if(t.innerHTML=this.options.text,""!==this.options.avatar){var o=document.createElement("img");o.src=this.options.avatar,o.className="toastify-avatar","left"==this.options.position||!0===this.options.positionLeft?t.appendChild(o):t.insertAdjacentElement("afterbegin",o)}if(!0===this.options.close){var s=document.createElement("span");s.innerHTML="✖",s.className="toast-close",s.addEventListener("click",function(t){t.stopPropagation(),this.removeElement(this.toastElement),window.clearTimeout(this.toastElement.timeOutValue)}.bind(this));var n=window.innerWidth>0?window.innerWidth:screen.width;("left"==this.options.position||!0===this.options.positionLeft)&&n>360?t.insertAdjacentElement("afterbegin",s):t.appendChild(s)}if(this.options.stopOnFocus&&this.options.duration>0){var e=this;t.addEventListener("mouseover",(function(o){window.clearTimeout(t.timeOutValue)})),t.addEventListener("mouseleave",(function(){t.timeOutValue=window.setTimeout((function(){e.removeElement(t)}),e.options.duration)}))}if(void 0!==this.options.destination&&t.addEventListener("click",function(t){t.stopPropagation(),!0===this.options.newWindow?window.open(this.options.destination,"_blank"):window.location=this.options.destination}.bind(this)),"function"==typeof this.options.onClick&&void 0===this.options.destination&&t.addEventListener("click",function(t){t.stopPropagation(),this.options.onClick()}.bind(this)),"object"==typeof this.options.offset){var a=i("x",this.options),p=i("y",this.options),r="left"==this.options.position?a:"-"+a,l="toastify-top"==this.options.gravity?p:"-"+p;t.style.transform="translate("+r+","+l+")"}return t},showToast:function(){var t;if(this.toastElement=this.buildToast(),!(t=void 0===this.options.selector?document.body:document.getElementById(this.options.selector)))throw"Root element is not defined";return t.insertBefore(this.toastElement,t.firstChild),o.reposition(),this.options.duration>0&&(this.toastElement.timeOutValue=window.setTimeout(function(){this.removeElement(this.toastElement)}.bind(this),this.options.duration)),this},hideToast:function(){this.toastElement.timeOutValue&&clearTimeout(this.toastElement.timeOutValue),this.removeElement(this.toastElement)},removeElement:function(t){t.className=t.className.replace(" on",""),window.setTimeout(function(){this.options.node&&this.options.node.parentNode&&this.options.node.parentNode.removeChild(this.options.node),t.parentNode&&t.parentNode.removeChild(t),this.options.callback.call(t),o.reposition()}.bind(this),400)}},o.reposition=function(){for(var t,o={top:15,bottom:15},i={top:15,bottom:15},n={top:15,bottom:15},e=document.getElementsByClassName("toastify"),a=0;a<e.length;a++){t=!0===s(e[a],"toastify-top")?"toastify-top":"toastify-bottom";var p=e[a].offsetHeight;t=t.substr(9,t.length-1);(window.innerWidth>0?window.innerWidth:screen.width)<=360?(e[a].style[t]=n[t]+"px",n[t]+=p+15):!0===s(e[a],"toastify-left")?(e[a].style[t]=o[t]+"px",o[t]+=p+15):(e[a].style[t]=i[t]+"px",i[t]+=p+15)}return this},o.lib.init.prototype=o.lib,o}));
|
||||
@@ -12,31 +12,33 @@ const RESOURCES = {"canvaskit/chromium/canvaskit.js": "ffb2bb6484d5689d91f393b60
|
||||
"canvaskit/skwasm.js": "95f16c6690f955a45b2317496983dbe9",
|
||||
"version.json": "46a52461e018faa623d9196334aa3f50",
|
||||
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "055d9e87e4a40dbf72b2af1a20865d57",
|
||||
"assets/AssetManifest.json": "941c48d6044174e40e20a16f13d7f9b8",
|
||||
"assets/assets/tree_structure.json": "72c209a47ed760b21bde2692c822235a",
|
||||
"assets/packages/fluttertoast/assets/toastify.js": "56e2c9cedd97f10e7e5f1cebd85d53e3",
|
||||
"assets/packages/fluttertoast/assets/toastify.css": "a85675050054f179444bc5ad70ffc635",
|
||||
"assets/AssetManifest.json": "1b1e4a4276722b65eb1ef765e2991840",
|
||||
"assets/assets/tree_structure.json": "b02bcc4ece919ab87881adf1fa93a64a",
|
||||
"assets/assets/images/autogpt_logo.png": "6a5362a7d1f2f840e43ee259e733476c",
|
||||
"assets/assets/images/discord_logo.png": "0e4a4162c5de8665a7d63ae9665405ae",
|
||||
"assets/assets/images/google_logo.svg.png": "0e29f8e1acfb8996437dbb2b0f591f19",
|
||||
"assets/assets/images/twitter_logo.png": "af6c11b96a5e732b8dfda86a2351ecab",
|
||||
"assets/assets/images/github_logo.svg.png": "ba087b073efdc4996b035d3a12bad0e4",
|
||||
"assets/assets/general_tree_structure.json": "53a2792cf5c9639acebcb6da22dd6464",
|
||||
"assets/assets/general_tree_structure.json": "8ccc68dcb450997d90725263cd434f41",
|
||||
"assets/assets/google_logo.svg.png": "0e29f8e1acfb8996437dbb2b0f591f19",
|
||||
"assets/assets/data_tree_structure.json": "6b200706f582fdce35d8291a73356b65",
|
||||
"assets/assets/data_tree_structure.json": "5345cbc65d032f9493efbee7a6db4f18",
|
||||
"assets/assets/github_logo.svg.png": "ba087b073efdc4996b035d3a12bad0e4",
|
||||
"assets/assets/coding_tree_structure.json": "608b5921008860bf1b618a660caa1cc2",
|
||||
"assets/assets/scrape_synthesize_tree_structure.json": "a29d67be3d3ec004d1dbf3910a0f2e19",
|
||||
"assets/AssetManifest.bin": "df613c1ae706e7c77498439374d0a5c9",
|
||||
"assets/assets/coding_tree_structure.json": "55035cbc2632b71b7c918098a18f38b1",
|
||||
"assets/assets/scrape_synthesize_tree_structure.json": "1643d505221a468f5ff13563c095d12c",
|
||||
"assets/AssetManifest.bin": "791447d17744ac2ade3999c1672fdbe8",
|
||||
"assets/fonts/MaterialIcons-Regular.otf": "9199d816d4b165fcbc657fba7c056a46",
|
||||
"assets/NOTICES": "f2257e7a0f0782ee24dc25ae2060fbb6",
|
||||
"assets/NOTICES": "3ae4fa6452f95f6c20d11bef16c75e35",
|
||||
"assets/FontManifest.json": "dc3d03800ccca4601324923c0b1d6d57",
|
||||
"assets/shaders/ink_sparkle.frag": "f8b80e740d33eb157090be4e995febdf",
|
||||
"icons/Icon-maskable-192.png": "c457ef57daa1d16f64b27b786ec2ea3c",
|
||||
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
|
||||
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
|
||||
"icons/Icon-maskable-512.png": "301a7604d45b3e739efc881eb04896ea",
|
||||
"index.html": "7e34123fae1c5935160cb7f4230b1212",
|
||||
"/": "7e34123fae1c5935160cb7f4230b1212",
|
||||
"main.dart.js": "a0b37f98b0cab2f8b358da834cead394",
|
||||
"index.html": "b2e0c1b13df1619051fcc9e372e24f3a",
|
||||
"/": "b2e0c1b13df1619051fcc9e372e24f3a",
|
||||
"main.dart.js": "431df62d626de1a5adc5967d22002537",
|
||||
"flutter.js": "6fef97aeca90b426343ba6c5c9dc5d4a",
|
||||
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
|
||||
"manifest.json": "0fa552613b8ec0fda5cda565914e3b16"};
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
|
||||
<script>
|
||||
// The value below is injected by flutter build, do not touch.
|
||||
const serviceWorkerVersion = "3957695581";
|
||||
const serviceWorkerVersion = "2709162130";
|
||||
</script>
|
||||
<!-- This script adds the flutter initialization JS code -->
|
||||
<script src="flutter.js" defer></script>
|
||||
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user