From b0c6ed999c30279974926ba8a97e2c891f0b8e25 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Tue, 2 May 2023 11:07:50 -0700 Subject: [PATCH 01/56] Feature/tighten up ci pipeline (#3700) --- .github/workflows/ci.yml | 2 + .../test_local_cache/test_get_relevant.yaml | 497 +++++++++++ ...ve_memory_trimmed_from_context_window.yaml | 338 ++++++++ .../test_memory_challenge_a.yaml | 466 +++++------ .../test_memory_challenge_b.yaml | 771 ------------------ .../memory/test_memory_challenge_b.py | 5 +- tests/integration/conftest.py | 5 +- .../test_browse_website.yaml | 604 +++++++++++++- .../test_write_file/test_write_file.yaml | 223 +++-- tests/vcr/vcr_filter.py | 73 ++ 10 files changed, 1851 insertions(+), 1133 deletions(-) delete mode 100644 tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml create mode 100644 tests/vcr/vcr_filter.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95a04168..a08ecb02 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,6 +72,8 @@ jobs: - name: Run unittest tests with coverage run: | pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term + env: + CI: true - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 diff --git a/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml index 8bd34929..8a11796b 100644 --- a/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml +++ b/tests/integration/cassettes/test_local_cache/test_get_relevant.yaml @@ -497,4 +497,501 @@ interactions: status: code: 200 message: OK +- request: + body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '99' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaS6+zOrel+9+v2Nrd1KcQQvDM7nELECB2EnItlUpACOEWrjbgo/PfS+Q9OlXV + WQ0WS3jZnmM+Y9j/8a+//vq7CrM46v/+56+/i7Tr//4f87NX0Ad///PX//zXX3/99dd//H7+f2/G + ZRi/Xuk3+b3++2X6fcXj3//8Jfz3k//70j9//b0V0oz2TVyG/Rt/ZNCN/sr2Me7CatyjC9C+y5mX + y43OQnuxAIvnCnstoOdMy07C9hEVhJkt2odNlyJNVtPEIEoy1f743G1jZLXFlxxOSA8nvUg1eOgL + D6/UpEFVYO8qqKnns11X2uEU90xB2Ft4zPyMSGcv90Ihft0eeDxtwR3erNbgvRZXmEq31O0M8w5I + JkRhxjHIEe8+Y7d9RWbIdlbj+NPo3A2Quk4le1l0UBc5bQCvLtjjplK3qPWm5xE1kkGYHTeWz+3O + yWFydYrlGuxwuiw+JoTHRcZ2TVRl6Td8OhAk2oUYtvvRJ+kSd4jcHcRUPbfCro6bBnxjonSpJE04 + tCqWUXROMbOc3spYFY0ShI+FQry1v8jGYv2IUOccQ+YIx2M4Tu7FRAGx9ng7VPeQuyf7CJcSZKJc + 4jCkyzhQQLqEiLkv7x32RskqSDaPhOzS85ixM9I0VHmKRZ6SmIaDH0kNdHvoyQGg8CfxNFzg2wYt + sfB0Caf2k9sw+ReZWXe15MMldkVkyreCkbF1EdWN6w1sj+gMq2qC+nCfmtsXdEDw/rjiUzLYkYxF + 3GGBWY9sqCK7hGp1/hLvEqUtX/tPE/L11SRe3GLe093uAfhauhSx4hSO6Gk04G/ZG6/XdNIn7g3J + dp5fpmwDi0+X+JujJOFftrdKMRyXb/8GYV2UdHVdqLoYWE8JTvtaJkZQn3Q+uSiF9zfV5vlC4bgR + 8w7sbpWzgzC926HcVBWoaikTbJk5olZTi0BisSE7NWn4KBC7g2TcXdkeENG5nlcUkmo0iKkbj5Cr + KTkix7oBXlzflc/Kpw1QqkTDqA6VbHSPiQif9H5m1nlN+HjX3x36jU87RZnLyfr1AMaOhLz7j8BH + fE0aeFzyHi8sU3RHvbjEEPeLDVGOzyWaKmhsKJ/ZmR0ih3C+eD06cEfxQtSpydGkfrYBaK6tsVhV + Fc73/XWA9Vk0yb56We3gHO8lJMmQ4rpLM7c5+JcBmnd0ZF61vPHaYZEBBCdA9M/lGw5xr5+XU7uQ + fvvRr4aqOUN2WXvEWJ0sfdUohxy2vqQRUkeV2x8fWgfJ8bVhujfmYf2muYbe30RjhH22s36sG/hm + jz3Trh/G+3qbxZs+j3Z46M0NYqwcE5CxfGAEraKMZivlCNwo15jN9UPH5lGCflV12i43Q8bHhk5w + 501M5Wx68knTzQgu8QWRpziysJNW7gLtd+OJ4ckq+dS8NQPOYebP+mCgIe7dMyruyy87VDHT6WlT + 5CCevpgQLTmj+pHnDxQeISNeuVrr1Uk+drJbe1fiFjfR791RtGXnXjXE2Nx8LsjrD90GZLfHWyIe + /eEbjBTmeifa9UMQx6KGwZFOLp3IOfPH3XfyUH9x7kTvs1bnlrMpIV7M9dIbk8/ITTFBoFPC9C0U + fOLdVIGiqISYhViEw8M4BttPc2PEXTPb56wcU8jXF5Xdj2sr5CtW5tAL45aZ1l5r1ytS2UALZtFl + /8naSjwdJWgsd4dHWViEdIsmE+J+uaCLd73Rqf86mlAZWkOceyxnY5d+HOi044Io62OlTwK5yRC/ + WhcvV8PXn9zjRKG+Sjr76f3gHN85fM7am3nOQWq7LdTpn/Fg103C6aefwyMT2KwHqLe7SZPDsTDw + 8m0nLsUroQL340lErZLCp2hz1GCXxA1RDCdyuy30IlzW54p5zJBR4w5SCoF0ydm837OBlkayrV+b + hpHi7mUUbaIbuu33Jt0cVqeW7ftnBMiOQ2YefN4OZ9RK6Js9Me3d7hnyNzk0m76Lv8wtbjt3/TSP + JTRSqGGx2yltx6znGY7jpWDEKAo0nuRSgq829Xg1tBnqfnpLzDRgankrWp6exRLC2pcwum3PfBwb + V0OhUq3w4nA02sZhnwbm/kGjRv74g75TYjDle8P2Ec4Qc0/2GV764OLt7rsK2c4ORfkl0IA5Ltm1 + 6zfZ3+Sz4BywKF88NIz7dYXMVLvO+1nNRuniCSDa4JDL7W3zyX89jM2oRhFxN8IWdc+ifqCcKStC + vveM05RvKTiWisk+MK5hf1m8BfTbj/YleerT4dJW6L0WVsR9e2LGX+05AV58TEKOm8Kd+tzEaOvm + Ijv7kZt1j/wmwnMxhhj4/uYPYX2+wOPynZhGjTMa8bWq5HMmvglZD4z/9Af+jGeu//YodWfZIfpA + L7nk+d3yrUaQm6bBcMYHd0DSkKO2vp/IzA++cNyoAizvekqHZJKzCklDiSpPs4gpP59ZubnWRxj3 + 0YKQNhqyuf85IK2UMwn03ZL3xfoRgxC/1mRXRYk+bmFNgS6UM9H8CHi/EasHYNHrCOFt5tLL4mOA + Oq2ALs0m1PubZmMQn3n72+/+9CwVEZhZnXCyPj79wfbKCk7N8USiZFD5cPsMMZymMsVTu1zy8hzt + vd96M5VwVx+W730MpxOo7DJ6bdsno2lCFxSYShEDPsrrtQxUvFfEcw6Ptm4SqwPfi154MXl+xhev + l4LOgn1gh4P4RaOqCTnUi/UGb+7L0c3iwxTD3elGds3vVz6qHyag52vlMWVzfWXd3M9l9j1OTK2S + WzucgkACTQi/RMsGh4+77yJFSrKM5/FEaHzkiQHZ1ZaJwb0UTW3yOkNRHY/kWt5798/6irX0pOW8 + HnwL+gLk++dEzEop9NZyVjK6Vt4aC+cgzrjxdQY08w6F3vi4X94r3lZVv0ss+o/SZ+7JKOGLuicj + t8VeH2lhJdvNoTqS8ChRTu1WogjQWafFzJeMmYiis+Ce2T7a2z4fa7+EUn59Zj5YttU3GDvohUEi + 6q6O+OBNyIH4WtVMlYQV6t1jJaJw/BrM3txDf4jpLQIviM8EyzfnD89Bdy47Zh/DLuzxNYcf79KN + +lHdNZJ8DdzjxsEbqyz0aqw3NnTnYmRqGU7+9NPjmU/oamh1Poz1Kob5/8EC0176cLxLAiiPyibK + Lf74VKBqgtbntUsTphr+lPqohG6/6Gc9lrN+fR4rlF1Ei3jpELZDIZ0mVJDqRFyz9tvxWfDjH57U + sViFw+Q6GOVx+GSO/3y7QxUpJfTh2mXKJXbcUTw1E9BSW9HWqKNwbD+3Eha7vfGnf3KjQDKKD1pD + Dm5X6ZPl5AoE395mdsLalsl3R4PbZHK8kldC21TQODDvL6K/yMvlb6bHgMXDSBRmnvQJ5AxDpOGE + LuZ6Gh3yBFR5ukflZ1W7dGqzCcxGW1MxG1W3D+x3Kn/Oas4IFhI+xDSO5ePmarH9SY7Dmbcq8M7x + Co9WLei8ywwZiTK26Cc9hf5UrNMLGuUOYfGe4mwE+VXJd40SPIzuKqSltMFo3A8dOQj9xGlYiwYE + lSLi6pJ8EU8nyYD6UK7/1O8Y2O8EhKWB2eGxy3SeHKMUXvrk0mnx6rMuPuxiMJ9sT1FfBPqYXaQS + eJ9mzEj5Jhy1/FCioFIRXT71g88H2ynRrB9Me5ZtOIpjVcKzfLnEuLyTkF+VvQwPd0GZM7gXNEFQ + G+jHB972kfjjRkwCaMc2xZvhS9BglNdh80kvB2Lu9tBOOxsf0UHYCmRPVhWf9fqGdknUkPtye0XV + 5DoeSgHbZBfYe59Obl0hcfIapoyu4Qq/+tbAOGH0tD4he2RSBcjmmBxi5xGOhll7IOU0x8LMk9P1 + Ldto5gOMLtsmzNW0SuGQXzBRbhl2h199WFzmbH9d2m0/tbEH7LY1iHvXaFjZXWUj89N9CenLYzjQ + UvHgKfIFleb1mFbHOoajYlTMQlIQ/qnPKEgHcuiyJuPZ9aEAuQk2s9Fd9Yefn3SP8g73KeG8yO+3 + BXrp8YsmyRS0Ne8KE4S1OWL+dqx2eO7aEtTTpsddOqCM3dTHBME9uOJ1caWIm3WVgua/H1S8v1Q0 + KIpSbU/PbUW8ZrkMO/F09SDrXw51LAeFlZrmAoQbmTDHchp3UpTE2L5vUkm822eN6BvzAZrBmP7w + ItfzpINZf//0m8ZqPgIsv/cjXaIVtFN6rgK4PSOBXa+Lsz/peWgjL4jOTHUpafk18RT5lQcd21Wf + WB+1fH8GfVXnRPsMms/7PBxQOEoTca/ajnf7nt/QZldLbHddfFze6dqwvWvsQjdD1/r81RUU3VGt + 0FV6VsIJr4Yb+N7k0tXUdj43606El9C/mSb6YtuN+2UF5jO44SLQD3y0WhajKq9MhvduMPPiyYG6 + THJyiBzG6+uy7NAYGMEffuoPl+cE7ecqMC8ZPohKwhGDeyLbP38/Ro4EqKaZSexj+MrG0dl5f75v + 7vatPy6XJ3v76wdzf+TDZ7IeMOsLXm+kE/rDs7NfYZqAPz4Xj+gC+a1vmAPBN+TfkN3kMbDedP05 + cp9aDbXRbpgeVDDcyWeRoyYgKstp7meHOV/Af/iZON/1Kqx/439svZZYfa6GK7xiJlJspaMcjx7i + SHpfNrNfZGFxK9y5fjAMivcmeO/KPq1fyNsYa8+Z9+8nbMn6GkCoNCs8uYeNTu9aYELqLI+4EY/f + rLtnbgPukwRUXDxtf5j3G7q3s19d8wVn7ugEMOc1M/8WLpcE+vjDY0ZEc3euPwy9mz7oUK4P/zXe + 3/d30WHBqx8f3FQnJ2pgfRCN+6aDxSLM6OIUxPp4XTYBmM9nyHZEOLfD4iXKKOvjHR53NfBGoPsU + Fovd6sf3nIvDxv75U1rN9bmaeQfNvMBs75TP/NU/JA89PniNJNlnxX0yQA2wREXdkMLpwIMc9jQk + eOYbNCZskuGe3Vd49KUyo/ZBHeBJ44o5xdrjdRMPGGXFq2d6G5d8LNaXSQ4/5xM7zHnX6JD4KM98 + hLnTJe6U8vHxJy/AL9q51awfcErPAlNjD7fTyztREEwnpWMTftyJmaWNDt1tmPOWhzv2GdyQ/E4o + wcKpzKZnaYvwFKcN2zevozuo2leBmV+I9XLLlleoEv7kY1g8MX1wmONBl8oBs/GtcodOew2QHGOB + 2KlA2ukzpcm2PhRAisDeh0Ng7y8oFn1n5uXnzIfHDk4yWmO+mjCnuehHqCD1bTacNOyS42RAq1BM + 1+c1QcXcDyH4MptWhtW7/OUeg+0lrunsV3do9ZufFMiBNpYThnR07uavXggxmilj6NmZaHGNJ3y9 + L+pfXpSDheQj209fyx2execBfv/oyN6XjXA918talddPvNDV2p/iQ5Jv1619I7jdWvpIy1sAxX2R + 0q3QT6jnXrqAA9QZ0d/OA7HlgmpAWGTSz32buj0rAxOW62pB7NEu/dnPSr/+QnaBfkB0dM7DdlTj + CKfwuoXjN7QkuFZJwch5s+CTvz1jiPvtghj4mrR0zqsgaoSY3Xb7NhxzUZ39/KljP75i2WXvyL3A + t3QVH0S979Iihra+eczKrmu/vyzuAhJr+YmHqO/5j59AZqsPcT2uubQCGsMquXnMQDcIp0vMcnnO + K/FiZ8aoDquThnYP50mXL7tCI90dAjkzspg+H4bHh+Y9pkDe7gtfluGaD298VH55xpy3PttJupwf + 21lviKIoPp9+fLNu33s6BpbKp8XLimDbHe50Nc8PNyxjAe3nwmjdpYJPn+XBQeT7omznb7A+sK8f + gAahwQ7idOMs+H5ypGWjiUHTj+H0OV1FcI/SbvZTERqWS7eEfRn0zNY0e85nrByWyypkEXpUnEWN + XsmBZXyJs9uZOpeE8iGb8qUg3jmKfPqZSkGWmRhh2X/2vHOO9xzyr+IzVdqMPj1cvgsUvB9ryrYy + a/lBWFdo+b25TDucPXfcfWUMeHd+MVd5p5xBIC8gK949lWxvwzndVSlIhmUzd7nwXFHAege3D9/R + IZ2SlgdWOyBsLBjBrquEs5+LZPuSErbbM9yi+CDHKFH2Jv69T1mZAXRBuWdeuSr46LCLieSvsCQ/ + P8FXrDmjYLhbs94c0HAKzjLsd/xEn7uy573lfrTtrH+YsU+nj636sGHOswjJzlU4sG/4AMXWOrqJ + 8ErvvRNoaL+gWzzMPL1qEkJlvPD27OCjrdu/OnxE7icp8ZQNoBfyfRNBH65cmnaG4w9BRSa0zb0V + 89RFlvH0vMhBSeBJ7hvpxPldyxYQof1p9oc7f8BX30DO+/Zku7m+Z32hkF33y7n/btvx54cGm6gU + ae/WHyVBq375Kk6z1bqle3oekHFYMOK8+97tfvtj5klix42UdeLpIcNLj15MvyrPcPit90Hv10z7 + DH7Gm+VzQrM/ZVq2Wmf0c3qmKJCuNZYuydPldJekW1Q/2bx/Wref/aJsrA+E7R72pPOXxyREd2ZJ + XC0veccP3RE2iyKnm/Khh/QUiBU6w16g5ewP1s8yoLB7i/bcL2/h7BdttKcP5+evMnZVVAk2u4bj + 2a+jYs5PkZR3OXOP27AdA/ueoP3NXJKfHrCxaSJQHo1N61351me9vqHCajeMrAeCRrTBw+97WHjk + vU4FEkuwZLces1BfIT7nfSjZPAs8Ds0hEyKnfUjLu5rS4yUO/SmqxwFo0WnMfhRZuDrJaQn9ZR8R + S8k2/q+foUayLgS/qdCyrxwfoXm/r/hT3kRO40Mnozk/YcrqOCAuTt4NBOwaxHa80e3v2StBW1/W + mJ1vtLbEVzkBRdGc+XuXjLFyK6D4dXmwOU92p1wMI+h9sZ75oc9GJYkqNIHf0lm4M2q3W01ORutK + TGWpuzyspAhdrfKGl+fNoRU7bZDBChYq1dfTOqQrci7/8JQDgeWP+T1ewJ1XMRbDb+MO5FaD7EhH + k/jhznPHgxDKsAWRzzyr6iwV9DO8cRFQHu48nZ82ywjUFN8Imf3X1CbXIyx6rjE3O03udLhkDehX + XWd7tx35aJgfDMtlExIjl7pwMkxkyrLk9HgjdmnGdtXpj/8kWFf3IQ/tBfx4m7l3DYczL6bbXl9R + 5o7NKus1Q49APUkVC/xnj8ab5qVQXW4PdpDWe9TL66UEwvrxnvOjN++/IZHQzkq3zGyUmDN8Gb3t + XuwTvAJZQpz3XgpWsDUJplrr//J2ac6TiNuGtVvssW5u5/n+k1+uDcsDuJhKS7SwTNtJ0x0bzf1+ + 9uejW7w8w4Elph7TvjLXO4lLE+q08wLnpiuj+o1rGTnvxmJ/8orP6SqAQ+7xnGcabntTrwaUz8+Z + uH0hu2yo7AeAc11QqVqwkHWZIYFskYzhZ57xoV5Ukazl1oYuzQL70z1jwS8PYfvykfl96pfz+SJ4 + tHx+P20X9/pxu6tii27F3tGn5bvHv/MY5hm27k/LZauhP/5MvsT6EJTSGbYg8Nl/IZ/vqmcl42vu + kr3bnlD1y39n/WNXqwnbxj5gEwqrEmi3fR3csYkHD3pfqJkqMsVfR43ewHx+hYO9o7b0+Die4eBS + gsWD77eTeJIuYD4fNzznle3wLOoAAumW//RZp/eMPcBCZfrLN0Nq7h8C6vUEEasInLDXd8Hll8cQ + /YhefAj3lxTIN6qJQ80hG99ka8BtCkuG/WDl99toVYJb4ytxTs8i7AViU7QjSfBbT737nccqm3M9 + 84SNBjVNEuC9tyKG6cpzHhBXoBzbAW9i/Mq6y0JtYKsXT7ZfIDPkdaxM2+sgiOyyOn31cT6fRPN5 + Mh0qSMLx59/9PhyJeU3X4XzeKIDfP0dmcE/j1GH9AubzT0IcJrY9+/bnTeqASwJqHttJfugXae6P + 9HPaRrNef+Ufv5M5L+e0fnEPdI9O9N1GQ9tnF5DhvVzf8ST6t5anEzzQ8q0syVUcNm5/Cqpo+zt/ + FF8dDce5P6JkExZ0FNmY8V11BUiSKcVii+qQ72xfhHuo50RV31lYP3ejg3Z3sWWkjjYh7z6iBuFj + u2MHeSqy6eXdNPjtH0y1KJu+6J5DpB0KYqdM0KebqmNAddDgiRqfbGqTxwJ20iDTJW91V/ydhymP + S8C8qDXD1U+vLvEN4c1Jjv3+l/+oTVoy7/Yx25F9izOKuM3oKG82fFj76zPg3Sll+/k8eCrWZYP0 + VfVmXiOr4aQolQl//24F/Oe//vrrf/1uGJTVKy7miwF9PPb//u+rAv8OXsG/BUH8NxP/3ESgXZDE + f//zX5cQ/q7bqqz7/91Xefzt/v7nL+nPbYO/+6oPiv/n8b/mb/3nv/4PAAAA//8DAPzVCbHhIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0b986749b8f9cc-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 22:52:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '18' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 0d93269b5615ee055622aca99d9e4aa9 + status: + code: 200 + message: OK +- request: + body: '{"input": [[18031, 1495, 220, 17]], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '99' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaWxO6vJbm7/eneOu9ZXYJCCTsO05yNlHxgFNTU4KIICgCCSRd/d2n8L+rp/vG + KgElhJW1nue38h//+Ouvvz9ZXeTj3//66++mGsa//9dy7H4bb3//66///Y+//vrrr//4ff6PK4s2 + K+736l3+Lv+drN73Yv77X3+J/3Xk/1/0r7/+Nhu9on7/9KJZFRRREw+dQKPUjAE9rMyjbnrqTPHl + c7CJYWMEP8GzpfZ+7hJi248X9HfvB+Gis+tn15R28BuMChKNDbN57gERpCyKsfPy5Z5sQNvCJPnM + OCQjqom2IzH4qlwgq9C3MqafrCOUQiZR92jTbMgDoQIseDyRnutzP21j5QbTwAWEMfSN5vdOyAEN + DxsamYUSDeo0OzpQCpsGkdMmgwYphPIKvvGmoW7PTvNVgI96Eqj/KDc1l4ptDLL31aGGPab2jIpm + gBEBb0SaY9DzUNghePeeO+pDXiWjwOMKykhqcAD9iz1bp0qBSh1uaOhpVUQlYXfQ2Ls+oDqzKsDS + +XuGVe56FMmt03+79e4INbBvMF5Hc8KfRrKDosJKitw97meJJSmIGnBHsFdLMF8vowI+YVuhefBE + QMbV14BfTdTo3qiPdrcu7ilUhhBjS8BRPeaKeYQ+egc4Eo4cLO9ThoJCMmxcH0rNiNenYGhWLXZf + LzmaLDzkUIsPGeHKS+LTvYgVbSOeZuo8U9TzIrcQvL2SkW4qQnuyzStZX73HBsdvf6inPJArUMM4 + wZZzczl73aYDNL4axDZJ9vVwnbUKxpK7I00yXGz2Xpk7OFWnkgYvV4xouvq62r15mwSWyteeTpsy + 1kceOnQLKEwGks0t6FbsSK1pq4P5As4hjBRI0PRhhi0aPB9gxvMSI13ec37s7TMM3rTEaGeRfqI2 + FQGukgv6PMqmn0/4GMOSrGvsRKdVRM+2d4Zb5N6wTV5GLWv+S4HeqVSplfWnZDrc7RtUBh9jL/y0 + YDa7jwLB5jhQy+l0m+SVn0NkIw9vNkBJxtaXQgj6S0+d3frN6bdLJ/BQxBe2XjHMiJI+fPhevSx8 + 4KOSzLRTz9oqJBPRYTkBNq20GwT17Y4N8y71syx8cribrohugPmsJ/CCKTRc5YCdXAmy+T2blT5C + H9HLt48SAo4qhJDcLbwJs3vPM9ASaEhqjjQvGAHFWhPDTX1LyWrIupq4PTxCr3ja2LWMB5j13kar + qFHvNDZPIR89FgrwPbcpxunVrdePKX1BYwUrjOyn17OT5JXwKVkx/TOfsKUHcMAvY3meHe9Srz/A + IKcGEUUtTuayxBB8uveAgHQMsjn+flMYhENPLUBUTsWzrYBgZm80NeUt4qN5ZHAsnDtRLkRLJns9 + yTqyooiQrXmpWXK3X9AXOowvZXevyTwZoZavWpNuGvxM2KPYuqq1cTG1/ZIC+sorCPdx0VG3dHZ8 + ErZXpKWW52Krvpc1+/T2AZi+MCIVXtuom078DNQxIki661lGFKWKwa3fVzgQlYCvm3giusQlFynd + /lVPjxApamE1Mg7m9QH8O1+13kTUlyvaQ/Y1W9jGtwZ7WC2TOXCvN9hf1hT/8tU4qnULe1HwqPtw + Wnt27+ILXov4jU3rZWTSoY9C6JSJifE6L7PXZf9wgbFVPHr/CIndR+imgMvBNGg0Jh5Yg2KE4HXd + +uRSdnpNEnHtA7A5D6iLnDabbnexgp+uGdBb2HY2HzdlBctJUjGiGz+RQoJbEM1Qxc5XKZK5jqcK + 3tBhRGs5ftcsHYsXYPxF6Ob9qDkPWOTD7FlEdLMZqmgctt0Xkmtg00itH/2k5dyFSfD+UjNtLjaN + taMMnlthR7IlHpZ4HeAVHtJfPgFMfiUyLDhRsJ+iFR/k3ekLdent0DCLdoDpvn6AEZKe1Pv2fTI7 + 3bz7U6+ConXs+bLqNNhotx3erg0DsMMcvOBmnVnUUTUZzNpXE+HQ6C1qHVb2/LtJfWik2pu6mmdE + cl75BTjqoo0D8Nn3zMT5AdrXcEWkefpEsyVdJkAGdETqWVQAuar9B9AMF0SVL0r06cCWwMvO6hEP + d6M9XLezBiVdi0nnJoeIfLzwC+CnCujmIbxrdumeBhw8IFETNU/AHbUn8OpETyQIngGYw8cdGNYr + QNaZ9gTz2jq7YHlfaBg+sKfzzEQ4ct/BtxmVCd8JCoRrsprxNoJN9tkN+gccn+xD5LjiEbOy7gjk + lfBGz0np7PE7XzXYbk8Cti26+81XC0qDc8LyPOBTNH0+0HY9gpSdwGqe1KEBb7djgANftAHreNyC + axSc6c1K1Z4MyceFxanzMCLiNZnbra+ARzoVtBA3PJrfpxApQTn2OLhs3pyvWdYCvv88kH5HRt9b + QWFoh75WkVw6E6e6pn/AirIzta+HTT/L59wF7+wuYVdxOZ/bzo/BMn8Emk7GSe8lFjQTfYvR9tDW + JD52JWR5yZAcV4k9+8LO0H7nL2IeJqNmkUI93NoS4/aa1bw+Pr8wdOUttsuK8e6y6hTYsNMDiept + 3893PzZ+6w1Nx1WSjMQcBCjsvyVZxZ5sT8c9ImD6pnsc7m/MHp7WCUKolQU+jMThfN4yBE3V4kSb + kZGx9XDy4SocJopWLbbp6zNZulYcJoov6jOiiWcRuAcPTua397VZ9MrYL5/iramt+4+bBwpc6jPd + wnIH+if0crDOLZs6wbnvJzgHORyIccHRaf/i1D7VDtTs3Z5mD6uuyToud1BoiUumerdO6Cnbu3Af + FkciCn0Usaq45yBVIxM7WtTwcVKBAN7eIyKqL9p81D3r9e/85r+1fiRPxYL3W/vE0eXpZSwdzy3U + 2D4jK2f94eN7JxfwfatyjEPfSsaDF1baC8UfpIBZ4OQX77OfWxQdqwRMg9fHsKuJisSDnCVzil8t + +LbiDge71zmZNkf/pSsb54hPHJ8TNtXcAegU59h5XTc9r/xjCx7uFFDPvIwJIf3Lgpp92FNvd90A + dgw2L/i4TRwv820z2760cM+rhkhX0kStoFoCZGaVkPm+yZM/40Elgti9H30+i8XWgBO8tRS7cp4N + L4scoXPGGjqy0MnWQJgN8BhlB3uRP0Sd5moxFJoupJbdsYRzU6qgO1eY+t3c8WE3mzf4CeoWibnS + JbPbSAo8YTH93Q/061tpQb/pN0R/bQvOxXOkwfuhnmjU2xt72ByNFzzyScf2fg4yHn1vMUiMpkT6 + umyjacOlHCz5gl7mwcomeZGDz+9qQ43U+GZ0uOcuLIlUU+OCT4Ah/yVDIsUPpNanRz/9xvfTw4gf + i77Q/EED6I1mHI3Jm0+fdepD4doQ6pdGFPF6d9fgCDYH9HbSVzacgxODdoJcbCz1fp5ULvzRL54+ + 1pwk4ioE76aCZNBvh4Rf1p8JSnztkjWHL8AHyfvCm7G3cSyHm2jYp+sP0OOKU5s7DZ/V24TgsDYq + aoFH3c8gfE/aBh1ntFYgrbmxvaYa5GVH7at5rplvhEyrOQoIH8Q6+QZTmoO3X3nk5bkfQA5bXwB3 + 7fPG+NYF9rI+HGAPaIee4ZGBKZmvE1wV9EZxqkbgK6Rr5RdfBMCTkcz8HBF4wK1BzcbV+VilDwtI + NjfIR0+8erqfoxYu9YbauTX2LBHXIfSK2ibd61Lbk/OgLlj0JoEbaRfxNHsWcFOnKcaF8simcDhq + sJMo+tXDbPr2SQ6xnshEpB2rOX0qMVjWN9J7JPX8azqpWpe+QnQj1DJ2/5oTOClaR4Tm2NXTqfuI + kM5HE9+j/px0llqX8PvmHVovemKMivsRqGFsICDZWrbo+wPs8ICwbe6s/k89q0azxDjxYzArqPah + ebICJHdOlix6/agdVl8XzUNqg3noPsLPzyCQP1nE6Hp24ODDmaIjtzh9Gf0H5ptcwlFsRBlLxJWv + RYa0xoZe3aOpq2++WjzIdtGjj4hP5hhCq98gwj2aZbPC+gk24TnE9p19OZOO+xjSZLejbvDMow8f + nBf4hpOII5fue/Z734X9YeitQNyPCXcKGJzGCIGTcQLzlTcIRoN6oObe0Tj3G8GFB1Q7aLeMZ8au + FkIfNQH6rpCUcXv7nPTseOLY9a0taKY9fYGKuDkC8FRmY+w+J7hLbldq4EedTUo85TokDwsjd097 + eumzAST+Z4+UG52TKauMHWzMo0H35OMm4xbVDvgqcrOVjefcz2rRMBjwCdJrI++T6ahmLrDlQCSa + jk0wJV2aw0fNBBoes8CeuZC68HU7ONh1mFHzTUBS0OTHB/aNGHIaFhsf8KpRaDijMhvSzfTS6bTf + UP9tmpyvbx8DEAk9yHyXp55tam7Au1fvEEcTS6b9TjjD+GI9qNN2fsSuz8nRN+g8I7463uvpeLoV + QHaVC93o5a1na/9YQsaeDJsWl/s3Ci8VvHhWSSZ7VOx+/9UEeFppBY3Dk5S0Htc/aviUR2r2b5pN + SNGOoLt0dxyX78yenaCNgQX98JffkunHCzznoVNUJKyeza7UtMcw+9heNUPPr16fw2ZOXaIv8U8u + XWdAL64INcSK9UzfyS0c7/bzTz0m5uX+0mZ0A6i/7WlCHtZ2AtJpPiGBbvys3xWjBtVDhLG9md58 + mnMTatXKlMinMaSeFcx2QP/UZEJ0eQ84U6EP/MNbpOhQl9HUZ08Ed95Rx57thf0QllgGxzPzicJm + MRt2W4PBi+BkSFlXfs1wPLVQGD4rHK81Kxni9KzBxf8TeZRwvehXBnPxsaLh5jr19GzjM7zE+pWa + s7fKyDKfEPf7J5Kf9SMa4RnKv+uJKNxgNF9WTw0aF6lH7AoUm0SvbILqhA1sb41TxPo++8DVeYqp + 4zTrZEx4XMB9dB2p+xRJ1JK6IvClbxmOj+tPTe7fgMH+oZTUlFKSMHfyHXhbnwUCLDwks7ntWnBp + 3D01u0sMRty+LfA5vS3EdmCsyR2FueavSLT4ZbNfb4tTqSz+n/BlPJNuxiFw4sMZL7/nnLvmDm6q + S07DMv/W38rtFK12Njn1l/mbtS8ToXSW9xQNUsM5vk1H+BhFB7sBiyLexMoAbXdDKLKf735Ui3GC + J29+UzvcbaPueewKIPrkRn56Vlr0DpiKs0TjXjWANM3sA4a1DhADj7qecy61MLB6beEjx2jC6XsH + axBccLw9xxlXilOrlSfpir7nW15Pd82JoVc+r+Tb623PfCBDmNnpDf/qN4uMxNcXHoff+e2dTZf9 + xYGkjkO8q59twsykNAA9bjscdlBLPsDXHRCHuo23cR6ChfeIsI0uHZHXbmATBw8f2GT7msz7Uwc4 + GQvyq7803m70jJUED+DImY5aa48ANU4HAsWGYmztwNh3jaIVULwRirp0c6jZcZMaEM7Vkdr6KYwm + KS4d6MS7M3Zq1YikHRMduMU2R9LCP+bgezD0JV9j98SHmptqcoZj4d5pqKEvGO8v+wxRHu9x5Dxo + NBiX+w0+bTUk6vMcR/PlcZbhwg+QdB6JPUssS+Fl7bbYcZpLMtrmoMDZuO7JkHl9xHkj+OCcqwm1 + nrmYzHcjJNA/NCK2PzaNhnX+HWBlWAO1PvrU89zjMvSe7Yl6+LNP6A0PTIuhs6FBcAxsktyjFsYb + I/yTTxZe5MPBFXIkL36f5aZ+g92m93GoFGM91NV808V56qnn+mYvi74TgqXeoVlKUdLvLNJqi58l + 8iS3PXN7eAa17dtE8YqqnuSpJHByErrEv2RPz/ByAAtPoY6srsCv/ursVOlIyrZJwg8r8wzXiX6g + W3El9mNIvNcfPxmXqQOY4R0E+IT6jUYsYcn4Os4yTD7DhXo02tUT9fgLjJ2j08BNmE3zGoSgsN4y + uRwCO2KfQkDAzfPPH741yNOHQBhcc2qbu6pnH8/6QBREBnX7jVfzj68fwWPPOmx4wttmb00qQdpu + JqTuToY9ty+Yq7/nQeLZsmdvXMd/9FXMAxJ9tvGUwkW/YXup/7wZmtcfvW+HuzEaoaYX8J63Mo21 + b5gN22E8gISSGGlVVtgT2PoyPF05pBs/DyN5WH8GbeGhhL3iPOPkVPvQc+46DsNrFhHkv0Tt6UCX + Rmi3BRreteGPP5D++cx7OpmjD043UfnxkHrRWwyoWXDF/mxe7FkVJhGqBH2p2zZWxq731xmA6+VM + +rES7GHrD1+I+GFLury58ElVWAqtzlWxu3aDaDbHIv/5LTK/1l7Np82kQaXfJKgZHqgWHyzbgS8V + bzhuUpz9qa+Xjf7ClkrbhCpVdwQLf0K7lxnxacRDBaydQynKDqCmKHyU8NQqR3zZ+TIf7lX6BaA6 + PWngizX/6QVt8bf0x48Yf6wJpORAcbjkJ35XzBKe7xAT5nR3ezqZxxwyUnt0+xGSaPIHp/zxj5+f + W/SF0sIlnvCSz+uprQMGVRWH9Ocvh9/vFz9L/RvdJ5zMVaE9PFpR3xpRMqVNU/z0wR9/wGpUI31o + jC3eBhnn/WjmDCRN59KIvcxkXYC1CxZeiLGz/gDS4Qj+0Z8/f0p6L7N+/gE7tDvUa98N0B9+Dhbe + xkDOd1A89ALiJX9ytlO0En7JXOBtlJd2N+4vMTCJlZB5FYwRi+NPDMxcv+CtYkQ1P5xFCHVQ69vx + NL4S+tNTthyJ1P2Kaf/jyfCn3xY9m3D9rBtwupwMokisz+YqfRi/90t/8SR90jWDvjyYSDeyLvlT + r4r4A6n7kppIjv28gIdr/cLewj/ENXy02lKfsJvWYj9oOXCgncTuwuPzhFmgQMD7tg8iLXxv8dcu + LFvlhDPhyDkFlUrAQWoMujxvRBf++PMf1IIPlEzXraqp37u8oebGvmbz5x6VkGltTa2Fh/PTaj+o + 96Yxqa0dE5srQvmFQ73aYzRLu2ziU3oGD0V+IVYMcbYm87eAe3Dn2L4emp49rC0DxZnYePN+2Hye + 9rSFQ2Nt0Wu3fgO2jssDlGsQ/3hD9vuuW5J/onvu9/3LVQIEriJ+E1hdlGS6zloJM604YVc6R3zp + 5xxBEjRf6qfoASac0gP48U5r25cJ78T14cdDFz+2ylj/xppWfpQvDqbUqb/xcHfAwm+wC9trNDIW + Gb/x0+hUAnvhJ62+PlmI+nS6J8y13wJw3w9/WU+fbOFv3x+PoeEDbuzRDE4iXHg7kh7CuydAYS9I + k8NuWQ9zQs/1IdW51AVIGKQNWCdeSKAl+A8kXt60p99ma4Fh0DOMn+o5m41NedDDmLs0ODRVL10z + 9QPryNtj91TpPSu/t1Bb+AdS3uYTfBR/IKC57xX6p58npS6DH/s1Yke45dHkH1MBat9zTO3t/mVT + cVzl/+YxLHSSWXT3OVQ27hHjR0CyGQnKBD3ptSKc8DNguRv4v/pD74+itnkpuiGg2jkik4s6zrw8 + dOESr0Ruj0kyrqsuhPtTLv7pB5Kmml+6nQVHUqZGmLBCc25QTeKIFqXoJLMUYgu8ttHwh5+w0+n2 + hegYlNRRe6MWB1C46o9HO216jYYHun1hqgYmTRGhoGt9PYSIBivy61dM5uM8wG2/wRQVL7MWnZrJ + 8Ncf88D9aX8vXWeBxQ9iV7lUYHqwZKeFD/lC2uFsZrKQrjSwnXyZWqfhyakilB/dnUuMLVM9RcVT + y5m29IOwZYVO1O3rQIQXZJ0W3uJxVrDIhSImGNFHQBJ+7QcHaqdD+6ffwQ+r4PyLH2z4wszJUyjL + f/OM9vvtyVLf/qyXepl/vvR7VPw9+0t96expBrIIowEckHCsOKD4PLSwgKNFJlq9wbQ26pu+5Ed6 + iEJSTyLEMSxnRSL6yqyW/m54hMYA9ji0FAHMVXox4HMwTPrjYwyktIK2F36xueRHkp5qC/ye/7D7 + Cv34y29SwR+E9iHlJPBjFz6E+Y6Reh5qIh33CAgzacn+1So90beBBR8uC/CmFA7ZzJB1g0aqvPHx + 4Udg0UM7ffk/BLfnIWPyK5PVxU9RLAxjPYcsOf74GrYlcQRsp7Dql//+9GP+9GcOq49LvaxjYBbW + uwN8btQH3ZzzlT2N+FXBCGt3vOgBzk9gA2FTXkIceh23J3XXWnCqLiWS3PidMHP9+cIfD9BPo5OJ + q9OtBE14DMlkEqeX8aQiKNy+XyS8k3vU78O3BnfGxadGVjqAaP6ggIU/oq44SfaMzsMNPgfLpIaL + AsD9QGDg11/96ddfvw/+/dsV8J//+Ouv//PbYdB+7kWzbAwYi3n8539tFfjn7X77pyjK/6Tyn50I + ZLiVxd//+vcmhL+7/tN24/8dP6/iPfz9r7+UP7sN/h4/4635b4f/sdzrP//x/wAAAP//AwC2t0TP + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0b9867da38f9cc-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 22:52:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '20' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 25535161a3c6c4c4d2d95830fc85649a + status: + code: 200 + message: OK +- request: + body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '99' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x + ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0 + Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071 + gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq + P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl + ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0 + HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN + SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5 + yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M + G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf + +fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI + dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv + pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA + S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF + o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx + WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0 + jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt + fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp + 3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG + MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB + /v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0 + DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK + fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps + HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz + mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd + PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl + zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN + nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7 + 6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW + tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep + aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj + 7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB + jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu + KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM + /uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7 + zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO + fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB + vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k + VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD + GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U + fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU + p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai + cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8 + Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP + ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi + MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf + uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg + CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC + tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX + baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ + qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ + ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf + zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq + pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89 + nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB + 4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr + Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/ + l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt + NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f + bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8 + jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl + 74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK + 1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY + mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy + rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn + eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn + IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq + uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71 + j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst + 6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K + H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19 + /awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl + s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f + WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK + mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ + 7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk + tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju + TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO + urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M + dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C + b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G + 5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF + bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f + ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc + 614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e + mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86 + iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ + G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR + aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO + mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm + AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M + H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr + T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz + iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4 + 5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj + l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81 + NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k + Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf + D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii + 7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh + JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3 + 01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy + 4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H + BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T + bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n + 4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+ + 6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ + QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2 + 4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0bc85bcee4253c-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 23:25:16 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1067' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 482e97acc620bbf5e2f7d3dd44145666 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml index bb29c4d9..970b806f 100644 --- a/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml +++ b/tests/integration/cassettes/test_memory_management/test_save_memory_trimmed_from_context_window.yaml @@ -502,4 +502,342 @@ interactions: status: code: 200 message: OK +- request: + body: '{"input": [[72803, 18321, 25, 314, 257, 330, 61665, 82, 794, 314, 260, + 330, 1342, 794, 330, 61665, 82, 498, 260, 330, 20489, 287, 794, 330, 20489, + 287, 498, 260, 330, 10609, 794, 330, 10609, 498, 260, 330, 38096, 42914, 794, + 330, 38096, 42914, 498, 260, 330, 82, 23635, 794, 330, 82, 23635, 1, 257, 2529, + 257, 330, 5749, 794, 314, 260, 330, 609, 794, 330, 17943, 498, 260, 330, 2164, + 794, 314, 1835, 330, 1663, 794, 330, 17943, 5857, 1, 260, 335, 257, 335, 335, + 5832, 25, 2290, 11344, 37957, 25, 4153, 5832, 25, 44921, 8245, 13]], "model": + "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '594' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaWw+6SrPm799PsbJunTcCKl2sO04iB+1G8ICTyQQ8cFDOdAO9s7/7RP87e2Zu + TMQOkqLqqad+3f/xr7/++rtOiud9+Pufv/7+5P3w9//4XnvEQ/z3P3/9z3/99ddff/3H7/P/W/ks + k+fjkVfpb/nvx7x6PKe///lL+O8r/3fRP3/93fe7HdujKCtm9AaQH459JDpOlmEvSuldKTb3hq4y + 9EnGJcUlek0rkR107xBO5h6vIbBDk1n1aujmt2ovYMrrgXkvfcdXzeomQ4hDgdhOt+um6pZGCuyU + BTm4LOLjeNqO0M6XM9NKpUS1p2aqIhT5ihmvte7xp7SQ0USOV6KdmykZykC9QPFsK8rTi1583uWI + IYufKZUDUnBa3+gI0C5KeuJSyadFvr9D5/lnYsJcdZNT9xfwK2lJto7AQ74U/UDBMV0xw3hdjTkX + cAR4Ye5J9H1eHj9uJjzR/UoscbNJmsPSTCH11ndyDkaOaOmWNXzeF4PYRK66sehif8NOtwXBgsL5 + rJ2EFqARl8yL37uumc4pwFHiI15eu9ZgXqX6iolpSJUjNbxJuWlvRTeeB7aPk84bLuJOAC4JAt5U + 6z6cpEAS0DdeVOldxrvD6NmQdQcZi6el4A2rSDAh9D2JeOGq8qZvfJTQd6RvvPJiiN5zBLp+Lcnh + qhQevTAXQADxzXaX0SxWIjxOco6OAdur3tmbPEmrf8/LrMtG83i/E2Q4GZs1/sV/ctqdC6VVThiN + smNMt/KBoXZjDy/cdenNu7XsgmjJe+Ys7LwYVsjxoRs2IztICkrmzyeZAWvziliefEHNFao1nM58 + w1yao2I+yPkdAu2wZYew2ySj40tPGEVoKdxHqZjmh6bLZBfLxN2ZhTHKPr7IaOEX5F4c02QcPmsV + PsFTZ2SZ9Al3F+IITSv45JpgEw1NXaewyQqH1hHwYjx0kwmLsoqx6Cm1MQPOffRslgYxcLvibPqI + LrTbNiD2ywu7WdpPoNykmeNGDIZwPEebFDovOFN5Mj7FuG6iGmK7Foh3saqEj6/lGiXX6kN2NZ2M + 0SFhLU9IiTE3QrUTptjQlfmxSggeb3Yyyqd6RPmu8PFY7XE3V/PGBH1VAlX0Y9RN92tbyuvja2JO + TzPObueNikoLm8Q2kdIx9aM/FefxWpLt1a7RJHtvH6Rn5DMj1Hs+L24Eft9/9Z7M67WuL+kFMnYt + d4jT7ODP8LGTmJiNUxZzFCcWbF5BTDxFcrppWKq9Yjb5me18IvNJdNAC1SsTU4k/8+7P+3KV4kpr + MTt7rAkzGQK7m8gObY7dfJDbpzy+Hxfm0C4y+vjUl/BEzyvDghIiSsF5yp8u1r/5oxerTwxPWDoL + xrAtQjGdcYBhf8A6uSTbVVF+18N+Yi8sBdU1mZnRvNGAtDWLCvbyJvZZRCjZDu8/95/5uTpBPRgl + /ZS3E+rH02EG7F5cZhru2+CW20poI1t3RiJyRuO8TWx4ClFEtGR9DFft/T1DjitCuYSNbl6vXR1J + Y7v8xqfppmOS65DqiztzJ2NbSImX3mFfxxPRjtsP5+G5yBVto6+J+vSxwQVBDGCKDhHbNmrr0fm0 + z2VooSSX+rUu2MPXAiDtSEj01hqDa/acQ3o/OjS1n+uEt/d+hMCZI6amn1s3730Fwz5dbOlGf5w7 + blznWt4f9joWHIWhwa60HNTW3NEmKzJj+jBuIpGXhBmATIPdrqEEa0G0mCVybPBzYQvAy/uO3IIo + DUe42wISbCrSoxt9wnHT8icYxTpgztZ00Swk71KG2yvAqD3UHXMi1ZXp9nVgum0J3mfrZ3/ij5cu + a4r+ajo1OlXhgYoPsUnY9A4u4LOwI5YfrULaIA7gP+UF2/Gz5Q1y81IhPu5tRq7TOeHm7XGH3hFu + zNBylc/65fVEP73XfWgSnugFwHx4UmZMY5HMXbW35H0KW0LY3CWjlacUUPjM2Rbv7WTeJLKJwix4 + MK/PT8YIy1GAjSxKbJc1STeii/FWzOzMmIalbSf6E1D5aBiUuccV5bPxWObQ5IFCDmPtG1/9C+Ap + xBFdNHNodFtDnwFp74xh6tYG/yhOiyxnETM83uqQVgGmsFoedOLOst/Nj3VykZeBPBJ87o5IvPBB + R6iPc6ygJOl6KY5y8CthSQ7qKU9GeFlP6FaLiJideiwm7THZyldfiU2FU8f3ND1BH4wxs4Ik5fzq + SCnK7PLFzFa2kkmTHROCOJuJE/GrMYWFbqHzECpE3ypLo+0H5wQb+eUQYl2Fblb8rQXztJ+YGmnY + GyMvo8onlxjR3GibdDs/NSG5fj4stuZNMtzs/I5MvzwzvDGWXpMKqg4VfeRky4c24SdZusBp6nOi + utVgzHdcmgC7/Zvt9ZODaFyNMdhXEcg3n4xx3fgtiI0rE/zBu2LyJrYHL58p2W6p7fXS2vNRx/Qr + UZch47xbbBayVNEF+da718/LhwSLtUkJyUEyaNqkVNH2LCfaMM5FjYdwrayF5w2P1+LQjettWgMP + Hy+azThAHEtqDPuXZTBXP22M/mpqNcIiKqnA34rBzstoBFaGS7JvFimvN7jL0XvYd+TxHDGi210v + gHYZXfL1T2i8QJZDUb1rtjV2M+o9uaCgRm/ObO9eJcNGb2JooD+SfSd6iBXhKYL7iI+ENI1UsHu1 + GJF49TfERc+7x7OyuUMdXvbE8YlXjHftlcvr4jwz43w8GHMUjYGyRmhLdrp4Q/M8nu5QL3KZ7Zhx + D+f6ktnyJbAroo42C7v4yddoKQVb4qmnqeg11dKBXq+Y6PP2kgxFfh7RfFF1EgXnRTjYxi1FVyw0 + 7Lo33uGQvyIX9FCSqXQ7XfkcKmIJSepuqCQfaTi1XWmDU6UVM7erd9cv+25GX/9CZSQdDWY5qq58 + 3xcd3SzjbT9aoGD35BI//Wy6Yb8hPtq/TIMZ/iYyxnvxpspS8rdsV+4Szu9luFCqS+0wrzY5p0Ly + foMbsAudh8zlkruTL7/8p4vKe/BR39spRKapMvtS1QYXrnqOXMnqiFPMmldvE7VFX7/MbB6cUDdX + V4Beazie16WaCKUcx8hDlJJ9xdJuRFcaQe1GHgvzw6GQPp9kBG0/5ExbozAcvv0Dff0S3hzqSzjS + WqzhcM9SZnk0TViZ33N0LBMDX5iqh6uyWF1AScuWmSsIjH7dRC2KMyVnd7MRkwlvNeFPvR+6niWj + PIc5iPOjpgu0D9GYXlcUJThYsa26eHpzhnkJOxt57HCvYo+vHJki2FVHdmgQ+9NPFJXHIl5/+/vw + rV+lelAbizh68maXEBXlsvzByr2SvSmPRfrzr8SUtpU30v2HKpH5HPD6vROSRvQuI3y6kbGDGR86 + etmLI0xorzKSSy7n/vviwtsRL3jdK07BSBIGgLQyw7I1b8I5Nu0IzevuQba2EXbD1x9BFq8OFOJM + SjiCVoLzPS2wNJC8YI9B0GURsw/eLPCBT1u/OSFr8ZCYq/fU4OelP8P6uDNp/u0f07LW+p+fZuq0 + lMOZryAFpmcKXXa96q2wZMfwJJ7GdqsMiin29RQyZYtorphpMV0f8Qn8g6rQ03uaOn7aJDmKxtmg + i+OnLOpNfwmQ/BCeLDH7shtSP9JBX2HCHLV0C16IWQnRQVUZXrbCN962Do9A7JkaPvKkcYNjjZb2 + K2G7g0zC5sOQCW+puBEVL3NjNLqLCXw5C0RPGqeT9HVfb0S5xbhaZdBN/iT08OlmRtReN40pu46m + 0soeJc6sZIjKy9YEz77eiDlVFp/IPpNhsJBD+dVee715NiLYrl8WRbBtOF1q8RPVifMhh2dbdF3V + XWPY1IpIdrtQNqpqrwnoq4ds+yodj3/zGbJiwsR96KSjZY8EWDz8noXDc1P85ielbBYWXjs3BbH1 + kFMIuvZGpZN07cYl3qhw3z8xIc+nyEd3o8pKt4IIs1g9d1MmtgKs83dEDp6mF1MWnmvI09UCi6+y + McaXf61lej1j4kZvlozUc9frPnR2TF/3XTe961CHkvsC25/2ntfeQzuHPIMPs/S1Z8w//ehG+UY8 + x7XQ6oWOF+Xbr6moLhbep1L8SPnNk8FRF4pp92ru8Ej0gJjBZgy5q8SAYnWb0fG2vhljsL0AgBCe + MPQHzRvzQsbwfR90/s7n03lgM7JxgZl+jpuCa7acynLl7Qlu+grROWgoWp30E3OjVi7ox8AlBHYz + MatAez5Lbd+jeOelzJI9w5DkIrBhpRcq3vz83dePIxy2a+JsxkNBAecBfNS3RpIPq8P5eNT7nx9l + l9PYh2MSZwLUmrSmo3J/Iy6lhwjmJ+h4Ck9bLrCj6ioyPXjMUDSSzNfTQ4LTxsNENQQtWbXyK0XG + TmnIdrDEhBuCXsqZskPf+fsW8hcILoTcfTK3NZOE7oRXjxanS//lC1UxSGSzB1pLOV2v7TaZt42s + I2dcnSl8+cZQBvYJTinBdO3ddyE17vcenMdjyZxTlXat2vQ5rEnFCBl9D4nsI8Xw9ed4w43Bm1I2 + vFEmNoSp0kPu+Np75OjsGYRsccCLwWmJjeR1mxLvUFfGXAVWD7cEJqbZZ8Sp8VjlPx5AdMVMu8En + VQ5DxF1ySJfEGx9NtoCZDXdmbV9pyL/9Qw7so8m+vIHXfXZ+w0vWE4IZ2iHeaav2xz/Yb97ru5uS + w/aCo5/fCufnwi8hzpY5sU7jPpw01K3BGe810e+71qBvrZthIz8c5pRalky4niSFRDJmrolwOHB1 + MhUVLWzacjVBvNprkiLTak3Xh9XTmw7ZbYbandds501xyPd1Z0IfZgL76cXk0uyt/HiRAehtTC9P + KUG72iuiG7md8NnWY7CxnRGP3Eo0D9HtCb/82C1Siw9h4VowNX1Pp855hO/AEGx4tteRrnul6cbn + IVDRb341WIXD6UOjALbMjphTpKukf8/zBTwh0LFwtv1uVBX1Am7iN3iyzwn/w3cYXQXM5O+H0T+2 + m4V8LG8GXfw3v0HiS9r/8qMYr5kQo13L9/Rd5lPCs9r04ezWOds+RCec+LP7oy/s8dU7vm8PMjrr + O4LXq9ZBws8f2bLwZvbi6iaNsn658DleE6yM+ZvTIGpHuD9IjaUgSRE/1mMJ93NEyF6PRa8vyqUE + P/8UEbkqaIHoHR6idPrqv56IP70vX/lEDp8HSVb0HO/BOx5j0s7CJ+l5lZ6U+KZlBH9OARp/8cRi + uiFWbmp8vCzMCxqOlyfTb2sHjafj8Q442b2Zeo4jxN6n5R76d53Q6RpPHj/aGP/p16Q6ed5qebNP + SNnONR61ru+o1+UXqKa7R/zb+uZNN7t9wi15m0wvZbHoMWls5A5uy8zH4oMmqnZv1D3cC13tjXcy + dFMXozKLOCHzLS3G/HNWIVDQjs4+OAmd9u/nL/8o/eCqG9nRtuGoNxoz0kveze2nCeDL4+jYJJhP + 1VpcQBA7W/LTa5Fgs1dO52lDxe0l9AYQjzmKJnv89u97N6tNn6KY9Jx52bE2Rin285/eki0jrdFI + PNv/4T0K1WI0hvuljpQNuuJNkV4T2g/aBWX9JaOrx+Nj9LfDPofLW61ZKGwxF31SpTB2xZnYl8o2 + hA50+ofv/PxyPcWejuoPPrBtggVvPMenk/I6nCKi48ExuBw/apSepAfbBmOI2C1atGh/yUe29RTb + GJPNMkJfPobH4+lQfOtfhp2wk7HEn3rH+1Y3lVdd7OioTSs+1fdSB+4WT6osuV/Qe6imEEMk0fi4 + oqjf64YE0oW0mPa7Rfd85WoNNjocmVnW0PXf+toIhesy6307JFxzzxf4zrPErg+vbirF8YQ+Tq4R + e3Ftk8lpiQtCuyyYJ0ZOMi23kCITxRe6TAfJGKmTWsqwCyV664XBG52zv4Zf/x+F9B3OGUZv5Jhe + QDdwzfmEhukOU+bdiNfngpd+6xmSmu6JcWNDMgSPvYB6rePkxzOFN32tER9Si91e8d4Y3eiZojKb + Uqbbz8YYD7Zm/uZ7ugoi0s3EGGL05Wn4NJAjEjflLd7ooSCTX/8clJtTglCgN3PK3b5bm2cvgova + hORANZm/h+2h/vEZPAu4S/qiXElgBNGCdgoIqD+fribc6Ykw0/BZwj/3awAfcftgmvAIjUYVaYte + tR3T25fXjLfQGUHNUEZUQ+47nuqbO+Q39U2s5JqFjSZrJpBMIlSxCj0cH7v7jMS0qpg3dzWa2ta7 + oMPe5sQpd33B37djrZz2L4+i/l7zH79DbMVDhrtmYczsNaWKl0fa1x+RcHZ382VTrdcrdhy3C++b + 7ydwzm1Jvjy567OHsIDJn3u68W6SNwfCTGG9my/ferKNL29UgWQCweuAcGNKuJyDlj4+uKkW12L8 + 8bhQ/rhErSwjGbU8xCDYvcjcz8L3xMZKTn/y/zC/EoP2tHfl8z0vKGyqjE92XpnoN49k5yOE7BD4 + sULTBoidbO2QM/mjbwRYvXGnxsAnofTkP3pIyKHh3bENI9gyNyKO76h/+ChoB8mhtHrsCoG3AyAV + gc2M3aSi+bb8rGF+LnRClHcWjtbT8mGuZIPW2fpkjOJ5YaGfvzXwM0u4V9k+uqZ9R6ybrYWTAlOp + mA3SKT97Gl9dnUUOfUJjCugdGVN/Du/oFw8DP7VkVU/3Gj7g3L/z3ux99UeVR0VtiHnGiM8PacxB + 5G9CvM37xefV2CygbeITXhTbuVsNh9lVjO6yII4Xtrwb77KPct98E4+8Zo/J0vAGKxESCp80Lab9 + bUORFYY2zb79s79u+7t8OPU7OimhnYiy6zxh5ZkhXURRkPB0cQFINo8Sw5fHT3IFe2CrKTzM0lX1 + phXSfEjvocPsnTmHvTwn6Z/9F6vMc2M+Hl0K3Ku3X166RatgjFoY1eFE7G//548BdAhJlJCruqBo + 7v1NjM7h0seLfvcsxh8P+/JLDEs4ePPydcB/eJx6LYaCxqYaIzXLBUY2lmbwVJ+eCDuH4scXDXES + pBTWt/ZK7NdxgZj6cZ/QS/mduEu4J3VQOhe4e9eKad/84k++CeTwxW1CwvDs8Qd7SsBPpUXcTXX0 + poV5pChzrhuy/5Qemnva25DPw5J923DXKOurjeaDqbHj3S87Fj9uFthzeGSuExbhSIfIgrxJjnT8 + +s/i8u5qubVuPd34xOuYs3pQ2b6aOXshY5UM2mNjw9ff09tXj2Zpv1kAGy8V2S8lDUnO2w1ka6B3 + hitPQd/9ihZerLJwlqyPiXAKBB/oMzng5fd+87k59LJ6tHfsmL+sgovynENFXzndiG0ejgaxXLD6 + SiSWvrh8/Z0bwxjgmrntZeuNXRT30D0Kg3krpzHGVN6v0eI03IjV1kPy4/MQC+AS9dznXd/rqqq0 + VtKz/VLK+GiGdQlKurwycq9aPttoNcOP70SrtuE/HoG++kGsFeR8Plzmu/KnPjtD50JQahelUVXC + /O5dFCxJLxQIgQNzLu8L56ouBsp9LBdENdW1MWb32EZ32sN3fyYt5uapYeX4sM4/foT6XPvo8FiM + JyxHbVzwaFYXYLnXHYao7cJfP5GXQWp8eVvXUbYkJvzZnzk9w2R0o0uOlE3aMvvLP8ZMdbE8Iawy + LHwWfLyF2gi6m++Jdi5KYy6JY4LQWy5dCnXK650VjX/mFe9I56T1vcPzz35cnt/FcHRIUv+J9zY5 + vZKRSM/xx0cof+7yZGp3kotUX0uZWx/GcFxv6xpoLeRUwob3X/zxGT3Y16/X4fjdDwZRrjE5FLbB + uSwN5R8+40caNaj5uZYQjaPBVPu5Dod88emBreISN9M+//JXw0eW1D6IdwkUNHF1stAkuBrD1aPq + ZrYkFkjP2MeT0b359DTPOWotdUmSbmGH7G5ne8V4B0uil4IbzptNL0GTNy0zv/2YO2i04c62DbO0 + XZfwNO0xshE5MqtXIs5KyyzhO998+UfTTTNc3kpcWA7B5hWS4XKefXT08ImmG5Oh+hcvnGzfuKFd + 5LGh62UoRnTB7JyLyVx2NxWaDivM3izFZG4QWsjOCp2oEn6owQPsPeVv/TMyoDQc1UVh/3gP+eq1 + 0QtMjZUwa1y2P7zaonfOvgwxoZztN5WGRHNvrWGt0iv58bhJe2zc334cXWzONe/FYLxDe81D7DeZ + bExhuaNggBCQw72SjUl4Z5ZyH/dH9uOhHZbUCHYQpMwr8CIpx+pYKn//TgX857/++ut//U4YlPXj + +fkeDBie0/Dv/z4q8O/4Ef9bEKR/M+nPSQTax+nz73/+6xDC301Xl83wv4f6/az6v//5S5H/HDf4 + e6iH+PP/Xv/X99/+81//BwAA//8DALUBqRrjIAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0b9868dedafa62-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 22:52:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '196' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 0dc16b9c27373738c2c364d171678493 + status: + code: 200 + message: OK +- request: + body: '{"input": [[53380, 8245]], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '90' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/embeddings + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e + RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz + jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/ + L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4 + xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N + neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi + US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb + m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM + uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM + tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd + E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+ + 3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm + eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD + wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung + IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN + py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr + brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx + c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc + hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23 + nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1 + 0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q + slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI + /exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf + A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1 + 10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs + pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t + xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U + o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS + UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi + qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz + uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB + oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI + SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0 + ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn + t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/ + 4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU + yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+ + hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb + 9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b + 8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R + oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l + M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv + CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv + JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT + tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD + qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls + iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh + 0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7 + r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb + mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs + We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy + 2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF + lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP + G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ + F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u + mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj + lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd + hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf + w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx + my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR + YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob + uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL + +5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+ + xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS + qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr + eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq + 0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv + uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ + vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq + MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp + ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3 + xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2 + J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC + Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD + 0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z + 81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs + xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4 + 1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96 + 9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS + P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o + 2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg + KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe + 02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ + j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG + OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX + dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw + uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS + 8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN + YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH + 3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9 + /B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI + JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf + PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7 + LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8 + Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf + zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F + /aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R + 5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti + +Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5 + 1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8 + UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja + 6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy + 0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u + CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI + VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/ + 1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA + +mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9 + c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198// + c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0b986ab8dbfa62-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 01 May 2023 22:52:32 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '195' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3000' + x-ratelimit-remaining-requests: + - '2999' + x-ratelimit-reset-requests: + - 20ms + x-request-id: + - 208e3672e7991aa80472beb0310e43d9 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml index a38ce01f..8b0ee7dc 100644 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -13,15 +13,15 @@ interactions: help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered @@ -38,9 +38,9 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2766}' + "temperature": 0, "max_tokens": 2763}' headers: Accept: - '*/*' @@ -49,7 +49,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3294' + - '3303' Content-Type: - application/json method: POST @@ -57,21 +57,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA4RSS2+bQBC+91eM5gwWxHZtcUsPlVy1aQ9tpapE1noZYGvYRTuDYsfiv1cYcCo7 - Sq/fPL7HzAlNhgnqUomumypc3T/dRe2X59W3OPrVbhZff0YPhyZ/WHz+8PETBuh2f0jLODHTrm4q - EuMsBqg9KaEMk/j9+m69iFbROsDaZVRhgkUj4Xy2DKX1OxdG8yjGAFtWBWFywsa7upGtuD1ZxmQ1 - jwN82X3B4+UiQHGiqgu0Xi+7AHXpjCbG5PcJa+JprXcVYYKK2bAoK71IZ4Vsb+CUWgCAFKV0bVEK - p5jACI4FOkgPprgBLl1bZcCivMDuCJ5UZmwBUhIYy+Jb3UvlbTyTg0BuKoKWp46+e3vGtKtrZbNZ - isG/VJ4UO2tsMfB9Lwly41mAhRowDOKgtRn53kd2wwoTKIr3DFIqAUuU9WM7goZ87nxNN7RNpezA - GMIPpte19kt68C2vaWpDuLeqOj7T6+oyEvK1sUPZ0mHwxleKtDditOF6Cn6yQZZbT4O1zZtZjI9D - 1RF2lDtP0Hinic73ejJSgrLHIanrPLghtZ+Yn0xV/f/gZ/uzFIc1XTB91RjezVNZVdPAcAn6SoTy - xfUvDoW+92X89hKTiLOQUU9qO+wCzI01XG6HN8MEWVyDARqb0QGTqHvs3v0FAAD//wMAM8Ih+goE + H4sIAAAAAAAAA4RSS2/TQBC+8ytGc7ajvNqmvsEBVA5wKKgSuIo264m9iXfX7IxJQuT/jhzbKUqq + cv3m8T1mjmgyTFAXSrStyvjuw9Ov8fxpM/7yuLefzObjD//5dkO733rz9REj9KsNaeknRtrbqiQx + 3mGEOpASyjCZ3C6m94vp5H4aofUZlZhgXkk8G93EUoeVj8ez8QQjrFnlhMkRq+BtJUvxW3KMyd1s + HuHL7jM+uZlHKF5UeYYWi0UToS680cSY/DyiJR7WBl8SJqiYDYty0or0Tsi1Bo6pAwBIUQpf54Vw + ign0YF+gvbRgig/Aha/LDFhUEFgdIJDKjMtBCgLjWEKtW6m8nIxkL7A2JUHNQ0fbvTxh2lurXDZK + MfqXKpBi74zLO75vBcHaBBZgoQoMg3ioXUah9ZFdscIAiuItgxRKwBFl7diKoKKw9sHSFW1VKtcx + xvCd6XWt7ZIWfMtrmroY3jtVHv7Q6+oyEgrWuK7saN954wtFOhgx2rAdgh9skOM6UGft4c0s+seh + 8gArWvtAUAWviU732hkpQLlDl9RlHlyR2g7MO1OW/z/4yf4oxW5NEw1f1Yd39VROWeoYzkFfiFAh + v/zFrtD2voxfX2IQcRLS60ldg02Ea+MMF8vuzTBBFl9hhMZltMdk3Dw37/4CAAD//wMA9lTsMQoE AAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe29d91e6b96e4-SJC + - 7c0ba80d58dc16a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -81,7 +81,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:17 GMT + - Mon, 01 May 2023 23:03:18 GMT Server: - cloudflare access-control-allow-origin: @@ -93,7 +93,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9335' + - '6136' openai-version: - '2020-10-01' strict-transport-security: @@ -105,13 +105,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86493' + - '86495' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.337s + - 2.336s x-request-id: - - d98bca49710a9416e1e51117937d09fb + - 0be816ead27a5540ee282bab5022d63f status: code: 200 message: OK @@ -119,7 +119,7 @@ interactions: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. + to remember.\n\nYou will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], @@ -132,7 +132,7 @@ interactions: Connection: - keep-alive Content-Length: - - '600' + - '599' Content-Type: - application/json method: POST @@ -140,16 +140,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA0SOwU4CMRRF935Fc9cd0hlQsDviyo3sXGAMKZ0nrc70lekDjGT+3ZCgbM/NPTln - xBYWPjjxfe6q+fLUPDVp//K6DqvsV9N1u++WP24/Ox4TNHj7SV6uj4nnPnckkS+TH8gJtbD1w6JZ - zMy8MRo9t9TBYpelmk7uKzkMW67M1NTQOBS3I9gz8sB9lo3wF6UC+1hr3NQ3rCEsrvsHtTGjhg8c - PRXYtzN6Kn/OgTuChSslFnFJLoWchNKl/lmdXFHXYuVSqxJLiGmnEp1UcEUFlzMlaicYNT5iiiVs - BnKFEyyKcIZGTC19w5rxfbz7BQAA//8DALvrL2VJAQAA + H4sIAAAAAAAAA0SOQUvDQBBG7/6K8J03JUlRmz16KHrQgwiKImW7mTZbk5k1O6GlJf9dClWvD97j + nRAaWPjWqe9jl9/evX7XI78f54/j8uXpfpPqt+fdsjru6oOHgax35PVizLz0sSMNwjDwAzmlBra8 + WVT1oqqK0qCXhjpYbKPm89l1ruOwlryYFyUMxuS2BHtCHKSPulL5Ik6wdWXwn/7HBirquj9QFuVk + 4FsJnhLsxwk9pd/mIB3BwqUUkjrW86GwEp/vH7K9S9nlOHPcZCzaBt5mTPusdSlrXYzE1MwwGWwC + h9SuBnJJGBZJJcIgcEMH2GL6nK5+AAAA//8DABCIDMBJAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2a24dfa796e4-SJC + - 7c0ba8446a3e16a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -159,7 +159,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:20 GMT + - Mon, 01 May 2023 23:03:22 GMT Server: - cloudflare access-control-allow-origin: @@ -171,7 +171,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '572' + - '905' openai-version: - '2020-10-01' strict-transport-security: @@ -189,7 +189,7 @@ interactions: x-ratelimit-reset-tokens: - 88ms x-request-id: - - 396e6db6eeae90fd3920af59c662d881 + - 904792458bdfabc0ed4f7fe647a44b3a status: code: 200 message: OK @@ -207,15 +207,15 @@ interactions: help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered @@ -232,7 +232,7 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nI was created and nothing new has happened."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", @@ -248,7 +248,7 @@ interactions: {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": - 0, "max_tokens": 2552}' + 0, "max_tokens": 2549}' headers: Accept: - '*/*' @@ -257,7 +257,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4438' + - '4447' Content-Type: - application/json method: POST @@ -265,20 +265,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekarq03c2tggv3IoQIqmYnTjJ04gkzTj+o8t9Rmna7myLg - 6o/Xj+33hCbHFHWlRNeNjZer/ezDqljtdh8XTz/X/HW3Pc53u1l5/LJfY4Tu+QdpuXRMtKsbS2Ic - Y4TakxLKMU0Wj7PH99PlbBph7XKymGLZSPwwmcfS+mcXTx+mCUbYBlUSpidsvKsb2YjbEgdMnxZJ - hDftl3gyX0QoTpS9hZJk2UWoK2c0BUy/nbCmcNX1zhKmqEIwQRRLT+lYiPsNThkDAGQolWvLSkKG - KVyClwQdpA9m+AmYKAdx4EnlIBWB4SC+1T1g2MwmchAojCVog+HyXNGXbs4x7epacT7JMHqt70kF - x4bLYcj6L6o9tjIczsJMB4FAAq540wBSKYEba+Gsdfvx1MYqHgbG8DnQn1H/Z9Us4xhWrOzxF93V - QS+Sk5CvDdMrbKEmjIi0N2K0CfX42MSh9XRdq+WcfP/Heyq4uIXsEZ6pcJ6g8U4T5f0z9kYqUHwE - UWEbxvcIDantdfLeWPvvH/P5qoNIF119dDndnY1Y1TTov5x5hKB8OXbfkOhrb+33OFeIM8iFJ+MO - uwgLwyZUm8FjmGIQ12CEhnM6YDrtvnfvfgMAAP//AwC356gC/QMAAA== + H4sIAAAAAAAAA4ySTW/bMAyG7/sVAs9OkDhp1/rW3YqdBnQosHkIFJmJ1ciUJ9JN3MD/fZCdjyLp + sF358fLhS+7BFpCBKbWYqnajz1+efz+0/PV1OZu9LF+/Vc189rbc8Pzpx3MLCfjlCxo5dIyNr2qH + Yj1BAiagFiwgm97epfd3aTpJE6h8gQ4yWNcymo1vRtKEpR9NZpMpJNCwXiNke6iDr2pZiN8gMWT3 + t/MEztqn+PTmLgHxot05NE3TLgFTemuQIfu5hwr5qBu8Q8hAM1sWTRIpPQlS3GCfk1JK5SClb9al + cA6ZOgQPCdxJDObwVKIi3ImyxBIaE7GUZSVeBdSFkhLfp3iRjmUnamUdqoYtrfuKWLroY8ZXlaZi + nEPyfmBAzZ4src9T/6bq7AZdq+I62hIrXRQ2FmmnLK18qHTPKKWWCEpooi2hjcgHa7GHEs0bvgSp + naaBYaS+M35M/z/b5zmN1ANp177hVZ2KIgUKhsrSkO5NZsGaL4hMsGKN5WrAelSE2BMgcRNwWPRR + NVRgiLe+pjqt7Vq1xJUPqOrgDWIR77O1UipN7cd+cI16c5y8tc79++zkt+McBpEuOf7awbqrVyNd + 4aB/svkCQYf15YcOiVh7br/GOUL0IAeenDroElhZslwuhreDDFh8DQlYKnAH2aT71X36AwAA//8D + AMDPrbAhBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2a28ea2b96e4-SJC + - 7c0ba84ab86f16a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -288,7 +289,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:29 GMT + - Mon, 01 May 2023 23:03:30 GMT Server: - cloudflare access-control-allow-origin: @@ -300,7 +301,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9064' + - '8196' openai-version: - '2020-10-01' strict-transport-security: @@ -312,13 +313,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86466' + - '86467' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.356s + - 2.355s x-request-id: - - bb4cad658d2a5f381ad0c0621cb834cd + - 1d952b657e5c0345769483734723e74f status: code: 200 message: OK @@ -326,7 +327,7 @@ interactions: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. + to remember.\n\nYou will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these events from your past: @@ -340,7 +341,7 @@ interactions: Connection: - keep-alive Content-Length: - - '714' + - '713' Content-Type: - application/json method: POST @@ -348,16 +349,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA0SPy0oDQRBF935FU+ueMI88TO9CFj4QxJWCSOj0VDJtZqraqQpxCPPvElGyPXAP - 554h1uAgNF5Dl9pssTqVz+XT3fx+/vb4snyl4WvYHday3i58CRZ4+4lB/xaTwF1qUSMTWAg9esUa - XDG/LW+n+aIqLXRcYwsO9kmzajLL9NhvOcurvAALR/F7BHeG1HOXdKN8QBJwRbG0cHVf+cyCsvr2 - SqrpaCE0HAMKuPczdCj/1p5bBAdeJIp60ksjkyJd+ldivJFBFDtrHkyPwbet0carIdYm0t4Qnkzj - xTQ+JSSsjUQKaLrB/F6NTBMYLewiRWk2PXphAgeinMBCpBq/weXjx3jzAwAA//8DADd8MExmAQAA + H4sIAAAAAAAAA0SPPUsDQRRFe3/FcOvZsB9Ek+m0CxZaiBYiYZx9ZibZ+ci+F2II+98loqQ9cA/n + nhF6GDhvxcUyVHcPb/vHg39d7rbt8liv9ifuntyzoxd77KGRP7fk5G8xczmWgSTkBA03khXqYZrb + RbtctG3TasTc0wCDTZGqm80rOYyfuaq7uoHGge2GYM4oY45F1pJ3lBimaWuNq/vK5xqSxQ5X0s0n + DedzcMQw72dE4n/rmAeCgWUOLDbJpTEnoXTpv2dlFZ9YKGq1UiM5OwxKvBWVsviQNirRUXnLyttS + KFGvOCRHKp7U79WQ0wyTxldIgf16JMs5wYAlF2iE1NM3TD19TDc/AAAA//8DAHxrB+lmAQAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2a713f7b96e4-SJC + - 7c0ba88e2e6416a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -367,7 +368,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:33 GMT + - Mon, 01 May 2023 23:03:33 GMT Server: - cloudflare access-control-allow-origin: @@ -379,7 +380,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '1006' + - '948' openai-version: - '2020-10-01' strict-transport-security: @@ -397,7 +398,7 @@ interactions: x-ratelimit-reset-tokens: - 107ms x-request-id: - - a2e25fb58e91f62bfd8851675102b189 + - 61597fd793784b1c948c5882e2444b63 status: code: 200 message: OK @@ -415,15 +416,15 @@ interactions: help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered @@ -440,7 +441,7 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nAs a system, I recall that nothing new has happened since my creation."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, @@ -457,18 +458,18 @@ interactions: "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_2.txt file using the - read_file command.\",\n \"reasoning\": \"The instructions_2.txt file - contains the next set of instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze - the instructions and determine the next steps\",\n \"criticism\": \"I - need to ensure that I understand the instructions completely before proceeding + {\n \"text\": \"The next instruction is to read the instructions_2.txt + file using the read_file command.\",\n \"reasoning\": \"The instructions_2.txt + file likely contains additional information that is necessary to complete the + tasks.\",\n \"plan\": \"- Use the read_file command to read the instructions_2.txt + file\\n- Analyze the instructions and determine the next steps\",\n \"criticism\": + \"I need to ensure that I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 2344}' + using the format specified above:"}], "temperature": 0, "max_tokens": 2339}' headers: Accept: - '*/*' @@ -477,7 +478,7 @@ interactions: Connection: - keep-alive Content-Length: - - '5444' + - '5489' Content-Type: - application/json method: POST @@ -485,20 +486,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekapvdbpTbSiAoEogDaIUIqqYTNxk68UQzDm2p+t9RmpQu - KQKu/nj92H6PaArMUFdKdN3Y+OFxN/9w16Tl07vPb16/eru0T1wu0+/v03T6EiN062+kZeiYaFc3 - lsQ4xgi1JyVUYDZbpPP0bvqQJBHWriCLGZaNxMnkPpbWr108TaYzjLANqiTMjth4VzeyErclDpjN - ZotFhFfxa+J+EaE4UfYaSubzU4S6ckZTwOzLEWsKF2HvLGGGKgQTRLF0mI6FuFvhmDMAQI5Subas - JOSYwRAcErSXLpjjEpioAHHgSRUgFYHhIL7VHWBYJRPZC2yMJWiD4fJc0ZWuzjHt6lpxMckxeq7v - SQXHhst+yMe/qHbYynA4CzPtBQIJuM1vDSCVEriybpy1bjee2ljF/cAYPgX6M+r/rJrnHMMjK3v4 - QTd10IkUJORrw/QMW6gJIyLtjRhtQj0+NnFoPV3Warkg3/3xlgoGt5A9wJo2zhM03mmionvGzkgF - ig8gKmzD+B6hIbW9TN4Za//9Yz5ftRc5RRcfDae7sRGrmnr9X2ceIShfjt3XJ7raa/stzgXiDDLw - 5HzCU4QbwyZUq95jmGEQ12CEhgvaYzY9fT29+AkAAP//AwAwBdt+/gMAAA== + H4sIAAAAAAAAA4ySTW/bMAyG7/sVAs92UCdomvrWnpptp2HFMMxDoMiMrVmiPIlekgX574NspymS + DtuVHy8fvuQBdAk5qFqysq1J7x6//Pz4qJ9/ET+9/zr9xNbPm/Vy/qF52jxAAm79AxWPHRPlbGuQ + tSNIQHmUjCXk2XwxvV9Mp9ksAetKNJBD1XI6m9ym3Pm1S29mNxkk0AVZIeQHaL2zLa/YNUgB8iy7 + yxI4i58Tt4sE2LE059Bsen9MQNVOKwyQfzuAxXAS9s4g5CBD0IElccR0xEhxhUNBQghRANeuq2oO + BeRiDI4J3HEMFvC5RkG4Y6EpsO9UxBI6CHbCoywF1/g6FVazCe9YbLRB0QVNVV8RS1d9TDlrJZWT + ApLXAz3K4EhTdZ76N1WjGzR7EdeRmoKQZaljkTRC08Z5K3tGriVHUEIVbfH7iDxaiz0Uy9CES5DW + SBoYUvEc8G36/9m+KCgVDyTN/jde1YkoUiKjt5qGdG9yYGzDBZHymrXSwQ5YS0GIPQFS6DwOiy5F + RyX6eOtrqpe1zV6sceM8itY7hVjG+2w110LS/m0/QouyOU3eamP+fXZy20kBg8gxOf3aaN3Vq5G0 + OOi/2HyBIH11+aFDItae269xThA9yMhT0BGOCWw06VCvhreDHAK7FhLQVOIO8pvj9+O7PwAAAP// + AwBEf5dDIgQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2a789b7296e4-SJC + - 7c0ba894ed8e16a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -508,7 +510,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:42 GMT + - Mon, 01 May 2023 23:03:42 GMT Server: - cloudflare access-control-allow-origin: @@ -520,7 +522,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9322' + - '8779' openai-version: - '2020-10-01' strict-transport-security: @@ -532,13 +534,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86458' + - '86452' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.361s + - 2.365s x-request-id: - - bfa86d8ef9949f22b8e710ac50b2fcf7 + - fee70e84f36b122cc0aef0db455f5eda status: code: 200 message: OK @@ -546,7 +548,7 @@ interactions: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. + to remember.\n\nYou will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these events from your past: @@ -561,7 +563,7 @@ interactions: Connection: - keep-alive Content-Length: - - '741' + - '740' Content-Type: - application/json method: POST @@ -569,17 +571,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA1SPTWvDMBBE7/0VYs+ysRO7cXULbQ+55FYoKaXI8iZSa2uFd0Nqgv97ST8IvT5m - hjdnCB0YcN6KG1KfrdanRVvuHp52zpWb+8dqW09lU2zaffM8gQZq39HJbyN3NKQeJVAEDW5EK9iB - KW+bRVMVq6rWMFCHPRg4JMmWeZ3JcWwpK5ZFCRqObA8I5gxppCHJm9AHRgZTLmoN1+0rv9MgJLa/ - kqqaNThPwSGDeTnDgPy3OlKPYMAyBxYb5eJIUTBe/NesrOKJBQetNmpEZ/teibeiIokP8aAinpS3 - rLxNCSN2ikN0qIZJfV8NFHO1/Zf9yeUwa9iHGNi/jWiZIhhgoQQaQuzwE0wxv843XwAAAP//AwBZ - 1nVcfAEAAA== + H4sIAAAAAAAAA0SPy07DMBBF93yFNWunStKmLd7BAhUJseUlVLnOtDG1PSYzLURV/h0Vgbo90j06 + 9wS+BQOus+JiDsXi9unzeTtrs22Oq7vSD48vr4tjGfYPdjWABtp8oJO/xcRRzAHFUwINrkcr2IKp + 5sv6elnXdaMhUosBDOyyFNNJU8ih31BRTssKNBzY7hDMCXJPMctaaI+JwVT1XMPFfeGNBiGx4UJm + 1ajBdeQdMpi3E0Tkf2tPAcGAZfYsNsm5kZJgOvffsLKKBxaMWt2rHp0NQUlnRSWSzqedSvilOsuq + szljwlaxTw5VHNTvVU9pAqOGrU+eu3WPlimBARbKoMGnFr/BlOP7ePUDAAD//wMASuwubmYBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2ac2bb0b96e4-SJC + - 7c0ba8dc5ff216a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -589,7 +590,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:46 GMT + - Mon, 01 May 2023 23:03:46 GMT Server: - cloudflare access-control-allow-origin: @@ -601,7 +602,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '1446' + - '925' openai-version: - '2020-10-01' strict-transport-security: @@ -619,7 +620,7 @@ interactions: x-ratelimit-reset-tokens: - 112ms x-request-id: - - 74efe1e7ce809342ac1cb7dd57f90235 + - a74d4954eeca5e3fb830e104f9527193 status: code: 200 message: OK @@ -637,15 +638,15 @@ interactions: help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered @@ -662,11 +663,11 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nAs a system, I recall that nothing - new has happened since my creation. Nothing new happened."}, {"role": "user", - "content": "Determine which next command to use, and respond using the format - specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt file using the read_file command.\",\n \"reasoning\": \"The first step is to understand the instructions and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file @@ -679,31 +680,31 @@ interactions: "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_2.txt file using the - read_file command.\",\n \"reasoning\": \"The instructions_2.txt file - contains the next set of instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze - the instructions and determine the next steps\",\n \"criticism\": \"I - need to ensure that I understand the instructions completely before proceeding + {\n \"text\": \"The next instruction is to read the instructions_2.txt + file using the read_file command.\",\n \"reasoning\": \"The instructions_2.txt + file likely contains additional information that is necessary to complete the + tasks.\",\n \"plan\": \"- Use the read_file command to read the instructions_2.txt + file\\n- Analyze the instructions and determine the next steps\",\n \"criticism\": + \"I need to ensure that I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_3.txt file using the - read_file command.\",\n \"reasoning\": \"The instructions_3.txt file - contains the next set of instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze - the instructions and determine the next steps\",\n \"criticism\": \"I - need to ensure that I understand the instructions completely before proceeding + {\n \"text\": \"The next instruction is to read the instructions_3.txt + file using the read_file command.\",\n \"reasoning\": \"The instructions_3.txt + file likely contains additional information that is necessary to complete the + tasks.\",\n \"plan\": \"- Use the read_file command to read the instructions_3.txt + file\\n- Analyze the instructions and determine the next steps\",\n \"criticism\": + \"I need to ensure that I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Write the task_id into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2132}' + "temperature": 0, "max_tokens": 2125}' headers: Accept: - '*/*' @@ -712,7 +713,7 @@ interactions: Connection: - keep-alive Content-Length: - - '6465' + - '6524' Content-Type: - application/json method: POST @@ -720,20 +721,21 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5RSy27bMBC89yuIPVOCbfkR6FYUaBv0UBRNL60CgaE2EmNpVyBXsAND/17IsqFE - RQ++znBnZpdzAldACrYyYpu2jnYfDyu7+FF+esHv3efq6+vvB3o9/qyKw5fuG2jgpxe0cpmILTdt - jeKYQIP1aAQLSJfbu9XderFbbzU0XGANKZStREm8iaTzTxwtksUSNHTBlAjpCVrPTSu58B4pQLpM - domGSXwitisNwmLqCdokm16DrdhZDJD+OUGD4SrsuUZIwYTgghiSISaTIA0rnDJSSqkMpOKurCRk - kKoLeCHwKAOYwb0ixEIJq4N3gkoqVGLCPneFciR8BriTtpNYjqKeXY2qC47KM3MeyoXzM265aQwV - cQb6rZtHE5gclaPlQ4XKURDf2eEIIU8m5SuOhWrwtlRz17Y2NBpG6lfA/8e9zWbmYr0TZ11o5udE - Cp0fJI2o+zf6lr1HKzevE1o0+6vJwdX1LR9GfIgzGOV6fa3HZf9/2kGmwdHp3b1mgYwv58UaieHt - JDFFeTc/7+EqWa6vCc8pL2Ez6qHX8OzIhSofmwQpBOEWNDgq8Ajpon/sP/wFAAD//wMAK3Nw1/ID - AAA= + H4sIAAAAAAAAA5SSy47TQBBF93xFqdZOlDgkk/GSHUIIFjOQER5FnXbZbmJ3me4yySjyvyM/gidG + CLGtx73nluqCJsEIda5El1Uxu3v39cfTYfNxeZ/stutTvtMPu0/n44enz49fUgyQD99Jy7Ax11xW + BYlhiwFqR0oowWi52Yb32zAMNwGWnFCBEWaVzFbz9Uxqd+DZYrVYYoC1VxlhdMHKcVnJXvhI1mO0 + XN1tAhzFx8YmDFBYVDGW1qttE6DO2WjyGH27YEn+Kuy4IIxQeW+8KCstJlsh20a4xBYAIEbJuc5y + 8TFGMBSHBp2lLcb4kBNYOgsY68XVusUC40EYTs4IgeQEovxxbxIwVrgrpKYg4FqqWuZyFqi9sVnX + 6Zb2wvtuRHNZKpvMYwxe2ztSnq2x2cjw28KDKSt2bSgwNmVXqo5JciVgiZKO7UDg1U9KIGUHaS21 + I3CUkiOraWpXFcr2TjN49PR3zn+kfhW43Zq4aGfEaOPL3up9B9sqkvUtXhdAbqO2ZkIWNDtHWooX + 6Ciukx2b8UPUynFFrniZxvMVqePV9GSK4j8ygOXTPMZergmujzPc44+/saqk3unmfhMg5bLpy/WN + dnaUGFFu9qcfGq6Wb6+EHeUAG9sGmwBTY43P9/1LYYReuMIAjU3ojNGieW7e/AIAAP//AwDTX575 + DAQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2acc48b496e4-SJC + - 7c0ba8e29de016a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -743,7 +745,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:45:56 GMT + - Mon, 01 May 2023 23:03:55 GMT Server: - cloudflare access-control-allow-origin: @@ -755,7 +757,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '9570' + - '9356' openai-version: - '2020-10-01' strict-transport-security: @@ -767,13 +769,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86451' + - '86443' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.366s + - 2.371s x-request-id: - - adf37a7bcd126ffa2323b9436545abfc + - 5f590ec504889866cee5d378c1c56737 status: code: 200 message: OK @@ -781,13 +783,13 @@ interactions: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. + to remember.\n\nYou will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these events from your past: - \\nAs a system, I recall that nothing new has happened since my creation. Nothing - new happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' + \\nAs a system, I recall that nothing new has happened since my creation.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], "temperature": 0, + "max_tokens": null}' headers: Accept: - '*/*' @@ -796,7 +798,7 @@ interactions: Connection: - keep-alive Content-Length: - - '763' + - '740' Content-Type: - application/json method: POST @@ -804,17 +806,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA1SPTUvDQBRF9/6K4a0nIf1OZ9eFQhHFjRYUKdPJMxmdzBvzXmlryX+XaqW4Pdx7 - OfcIvgIDrrHi2hSy2WI3pGp68/xYr1ZPn/u7r22dHpbXm9ltmIIG2ryjk3Mjd9SmgOIpggbXoRWs - wAym5bAcF7NJqaGlCgMYqJNko3ySybbbUFaMigFo2LKtEcwRUkdtkrXQB0YGMxjONVy2L3yuQUhs - uJBx2WtwDXmHDOblCC3y32pHAcGAZfYsNsrJkaJgPPkvWFnFBxZstVqqDp0NQUljRUWSxsdaRdyp - xrJqbEoYsVLso0PVHtTPVU8xV/f/sr+5HHoNbz56btYdWqYIBlgogQYfK9yDKfrX/uobAAD//wMA - lmqfw3wBAAA= + H4sIAAAAAAAAA0SPy07DMBBF93yFNWunStP0gXfACqkbYFEkhCrXGRpTZ8ZkppRS5d9REajbI92j + c08QG3AQWq+hy6mY364+dmV++v6sH1ItxOmwnN89h+WqfpyCBd68Y9C/xShwlxNqZAILoUev2IAb + zxbV9aKqJgsLHTeYwME2azEZTQvd9xsuykk5Bgt78VsEd4Lcc5d1rbxDEnDjambh4r7wqQVl9elC + 6vFgIbQcAwq4lxN0KP/WnhOCAy8SRT3puZFJkc79N2K8kaModtbcmx6DT8lo69UQaxtpawgPpvVi + Wp8zEjZGIgU03dH8Xo1MIxgsvEWK0q579MIEDkQ5g4VIDX6BK4fX4eoHAAD//wMAp9wyGmYBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2b15e8cd96e4-SJC + - 7c0ba92baa3a16a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -824,7 +825,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:46:00 GMT + - Mon, 01 May 2023 23:03:59 GMT Server: - cloudflare access-control-allow-origin: @@ -836,7 +837,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '1442' + - '958' openai-version: - '2020-10-01' strict-transport-security: @@ -848,13 +849,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '89826' + - '89832' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 116ms + - 112ms x-request-id: - - 6536a4bd33511e252d655771f59e71f4 + - 9fe1a228b9592784a0e00ed11fb84d69 status: code: 200 message: OK @@ -872,15 +873,15 @@ interactions: help you remember.\n3. No user assistance\n4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. Internet access for searches and information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered @@ -897,11 +898,11 @@ interactions: to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nAs a system, I recall that nothing - new has happened since my creation. Nothing new happened."}, {"role": "user", - "content": "Determine which next command to use, and respond using the format - specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt file using the read_file command.\",\n \"reasoning\": \"The first step is to understand the instructions and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file @@ -914,24 +915,24 @@ interactions: "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_2.txt file using the - read_file command.\",\n \"reasoning\": \"The instructions_2.txt file - contains the next set of instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze - the instructions and determine the next steps\",\n \"criticism\": \"I - need to ensure that I understand the instructions completely before proceeding + {\n \"text\": \"The next instruction is to read the instructions_2.txt + file using the read_file command.\",\n \"reasoning\": \"The instructions_2.txt + file likely contains additional information that is necessary to complete the + tasks.\",\n \"plan\": \"- Use the read_file command to read the instructions_2.txt + file\\n- Analyze the instructions and determine the next steps\",\n \"criticism\": + \"I need to ensure that I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to read the instructions_3.txt file using the - read_file command.\",\n \"reasoning\": \"The instructions_3.txt file - contains the next set of instructions that I need to follow.\",\n \"plan\": - \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze - the instructions and determine the next steps\",\n \"criticism\": \"I - need to ensure that I understand the instructions completely before proceeding + {\n \"text\": \"The next instruction is to read the instructions_3.txt + file using the read_file command.\",\n \"reasoning\": \"The instructions_3.txt + file likely contains additional information that is necessary to complete the + tasks.\",\n \"plan\": \"- Use the read_file command to read the instructions_3.txt + file\\n- Analyze the instructions and determine the next steps\",\n \"criticism\": + \"I need to ensure that I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, @@ -939,17 +940,18 @@ interactions: into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to write the task_id into the output.txt file using the write_to_file - command.\",\n \"reasoning\": \"The instructions_3.txt file instructed - me to write the task_id into the output.txt file.\",\n \"plan\": \"- - Use the write_to_file command to write the task_id into the output.txt file\",\n \"criticism\": - \"I need to ensure that I write the correct task_id into the output.txt file.\",\n \"speak\": - \"I will write the task_id into the output.txt file now.\"\n },\n \"command\": - {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": - \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role": - "system", "content": "Command write_to_file returned: File written to successfully."}, - {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 1919}' + \"The next instruction is to write the task_id into the file output.txt using + the write_to_file command.\",\n \"reasoning\": \"The task_id is important + information that needs to be saved for future reference.\",\n \"plan\": + \"- Use the write_to_file command to write the task_id into the output.txt file\",\n \"criticism\": + \"I need to ensure that the task_id is written correctly and that the file is + saved properly.\",\n \"speak\": \"I will write the task_id into the output.txt + file now.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\": + {\n \"filename\": \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 1912}' headers: Accept: - '*/*' @@ -958,7 +960,7 @@ interactions: Connection: - keep-alive Content-Length: - - '7433' + - '7518' Content-Type: - application/json method: POST @@ -966,20 +968,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA4SSQW/bMAyF7/sVAs9O4DRt3PoWbJegPQzotss8BLLM2GpkyjXppl3g/z44tpM0 - 67ArKb73PVJ7sBnEYAotpqzcJFrurp6/fK5+L+4e0vT6frFavTx+ffi2/HH/eAcB+PQJjQwTU+PL - yqFYTxCAqVELZhDPFrdXt9dhtAgDKH2GDmLIK5nMpzcTaerUT8J5OIMAGtY5QryHqvZlJWvxWySG - eHYTRQGcxE+NeRiAeNHuVIrCqA3AFN4aZIh/7qFEHoVr7xBi0MyWRZN0mJ4EqYuwT0gppRKQwjd5 - IZxArIbi0MBX6YoJrFShX1ANTJgp7ZySApVo3rLSlClCzJR4xUUjmd+RathSfny0Hmc7kVJTNk0g - ODerUbMnS3nvuBwMLLHUjen2wD1Eikhq453zuw6EsqOHsnxEvJSvnKZeeaK+M/6b612G7hW/sWB5 - IWdqK9ZYLsf9jPGRuKk7eS3qP1tLceNrPJhJt6rO8RKbK9Tb0WNnnfuITZHfTRPo59pgvOsQ6K+z - ki6xl3y3gAtnXeeXP+L8UGdXOoQ5heTGGGTeNM69HakOZANgQi20AWwsWS7WvRzEwOIrCMBShq8Q - h+2v9tMfAAAA//8DAKnylkSfAwAA + H4sIAAAAAAAAA3ySy47TQBBF93xFq9adKA+GZLwDsRlWiAEhgVHUaVfsJv2iqzx5yf+OYjvEZAa2 + 1V33nrpVJzAFZKArxdpFO1q8+/rLOj2dP71PH+OH4yHteRc/fTsenXsECWH9EzX3HWMdXLTIJniQ + oBMqxgKy6Zvl7H45m83vJbhQoIUMysij+fhuxHVah9FkPpmChJpUiZCdIKbgIq84bNETZNO75UTC + Vfz68HohgQMrey0tZotGgq6C0UiQfT+BQ7oIp2ARMlBEhlh5PmMGz+jPI5xyL4QQOXAV6rJiyiET + fbF/wD2fizl8rjChUAmFD2JTJ64wCeOJU63PiCSMF1yh2BiLNBYPgqpQ20JQVbMowq57pQMxOlGT + 8WVbYEXbVT8pCh2cU74Y5yCHGAkVBW982bE8Gq/x3P1/Ijl0rBSJi00hDFPrLJQvLqBrvLLeAkSr + fOc9El8I/00uOLw48Y2eToaNNuQ60QfhEdte9FSns77iIb2hgWpMIWKyhxa+/ams7XJv4yD1hM8i + pIhqe3HbGWtfXowPu3EOXWMjL/fRD/fsPLxy2Gn+FcaNtUrl7WUN19oJvLW2TXS4Jaq1RqJNbe3h + D1VL1gPmvoFGwsZ4Q9Wqk4MMiEMECcYXuIds0vxoXv0GAAD//wMALFWP5ucDAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe2b1f5eef96e4-SJC + - 7c0ba932296616a2-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -989,7 +991,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:46:08 GMT + - Mon, 01 May 2023 23:04:07 GMT Server: - cloudflare access-control-allow-origin: @@ -1001,7 +1003,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '7793' + - '8287' openai-version: - '2020-10-01' strict-transport-security: @@ -1013,13 +1015,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86458' + - '86445' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.36s + - 2.37s x-request-id: - - 1fc1636b39f8812550599c1474d1d565 + - ead9c9628b3e17b7f53b848fa1361338 status: code: 200 message: OK diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml deleted file mode 100644 index 904ad7e9..00000000 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml +++ /dev/null @@ -1,771 +0,0 @@ -interactions: -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine - which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2766}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3294' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA4RSS2/aQBC+91eM5mwjCI8g31B6yaFSpTaHtq7Qsh7sLevd1c5YQBH/vTK2SQRR - ev3m8T1mTmgKzFBXSnQdbPq4avy3xY/xbLY6fnkKT+rrvvn8aJYvP3flAhP0mz+kpZ8YaV8HS2K8 - wwR1JCVUYDZZLB+W08X4YZZg7QuymGEZJJ2O5qk0cePT8XQ8wQQbViVhdsIQfR1kLX5HjjF7nE4S - fN19xSfzWYLiRdkrtFzOzwnqyhtNjNmvE9bEw9roLWGGitmwKCetSO+EXGvglDsAgByl8k1ZCeeY - QQ/2BTpIC+b4DFz5xhbAoqLA5giRVGFcCVIRGMcSG91K5fVkJAeBrbEEDQ8dbff6gmlf18oVoxyT - t1SRFHtnXNnxfa8ItiayAAsFMAzioXEFxdZHcccKAyiKdwxSKQFHVLRjG4JAcetjTXe0wSrXMabw - wvS+1nZJC37kNc9dCiun7PEvva+uIKFYG9eVHR06b3yjSEcjRhuuh+AHG+S4idRZe/4wi/5xyB5h - Q1sfCUL0muhyr72RCpQ7dknd5sGB1G5g3htr/3/wi/1Rjt2aczJ8VR/e3VM5VVPHcA36RoSK5e0v - doW293X8/hKDiIuQXk/uznhOcGuc4WrdvRlmyOIDJmhcQQfMxuff50//AAAA//8DACSqIf4KBAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb77eec9416f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:12 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '8377' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86494' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.337s - x-request-id: - - 75084ec318eb8fbda968c3f79a242ee3 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI - was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '600' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SOMU/DMBQGd36F9c1OlBBaWm8wIGCMgAWhyrVfE0PiZ8WvKqjKf0eVCl1PutMd - ETwMXG/FjWkobu/27OsfeXmUw1v7sEtd67rn1/Z+eVOvoMHbT3JyNkrHYxpIAkdouImskIepl6vr - VbOsmoXGyJ4GGHRJiqZcFLKftlxUTVVDY59tRzBHpInHJBvhL4oZZl1rXNIXrCEsdvgHdVXNGq7n - 4CjDvB8xUv5rTjwQDGzOIYuNcjrkKBRP90/qYLM6HysbvYosfYidinRQvc2qtylRJF9i1tiFGHK/ - mchmjjDIwgkaIXr6hqnmj/nqFwAA//8DABWtlxRJAQAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb7c3796916f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:16 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '948' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89866' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 88ms - x-request-id: - - e02c6c27229229ec129826db88fa9f23 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past: \nI was created and nothing new - has happened."}, {"role": "user", "content": "Determine which next command to - use, and respond using the format specified above:"}, {"role": "assistant", - "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading - the instructions_1.txt file using the read_file command.\",\n \"reasoning\": - \"The first step is to understand the instructions and the tasks that need to - be performed.\",\n \"plan\": \"- Use the read_file command to read the - instructions_1.txt file\\n- Analyze the instructions and determine the next - steps\",\n \"criticism\": \"I need to ensure that I understand the instructions - completely before proceeding with any tasks.\",\n \"speak\": \"I will - start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": - \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, - {"role": "system", "content": "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe - current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead - the file instructions_2.txt using the read_file command."}, {"role": "user", - "content": "Determine which next command to use, and respond using the format - specified above:"}], "temperature": 0, "max_tokens": 1131}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '6479' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA7SSy4rbQBBF9/mKotaSsK2MZ9BusgjxIlnlBVEwbXVJ6rhV3XSXGDvG/x5kyePB - JoQQsr31Ore4BzQaC6xaJVXnbXr/2Dva9x/Uqlrm9bt79ebt6kvt5b3dfv2MCbrND6pkmsgq13lL - YhxjglUgJaSxmC8fFg/5cpYvE+ycJosFNl7SPLtLpQ8bl87y2RwT7KNqCIsD+uA6L2txW+KIxSJ/ - WCR4Wf5cmA+6OFH20nu3fH1MsGqdqShi8e2AHcXz4uAsYYEqRhNFsQyYjoV4sHAoGQCgRGld37QS - SyxgEqcC7WQQS/zYEhiOEvpqIIrrRSY7gdpYgmGjMhxBgTVRwNUgKm4jSKsEmEiDONgQeAq1Cx3p - DFYQW9dbDYqV3f8kkJamKcUaNAmFzvCoM+0EopCPWYnJS8BAKjo23PwvyrP+b5jeKh4JU/gUx+5A - Sq8nsq4btok7iafqb1yUJafw+FcsVyhVMGIqE7uR5+KQOPaBxm+soGdNYYiMfnFmSiTZPWyodoHA - B1cRacMNPBlpQfF+7L3+QPSktueTT8baP1o9mbp5e1biuPaYnNM7ve8mvKw6Gi8+v/oKSoXmOvNj - Yei9jN8CniFOIBNPyUc8JlgbNrFdj8HEAqM4jwka1rTDYnb8fnz1CwAA//8DAIjD0lt0BAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb7c9ef9316f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:26 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '10335' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '87376' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 1.748s - x-request-id: - - 0aa171294ea5788272f2d06269765a84 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': - ''system'', ''content'': ''This reminds you of these events from your past: - \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'': - ''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\": - \"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': ''Command - read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe - current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead - the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature": - 0, "max_tokens": null}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '2983' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA1yPTW/TQBCG7/yK0Zw3lp2UtPUtVUFwAQkhgaDI2non8VLvjtmZTRqi/Hdk4xbU - 42jer+eE3mGNbWe1DUO/uNxkTh/y18S3N7tvYf/l1+fyU/u7/Pj2TX+BBvn+J7U6O4qWw9CTeo5o - sE1klRzW1fpqebValxfXBgM76rHG3aCLVfF6oTnd86JclRUazGJ3hPUJh8Rh0Eb5gaKMAeXS4L/w - 58f62qCy2v4/6WV1Nth27FsSrL+fMJA85SbuCWu0Il7URh1XclSKI8FGwIIcRSkYeA8HKzAjgI0O - Imvn4w4iHaCzAp0dBorkCnjHB9pTMmAFBkpw5Jyg5RBsdOb5GrJSmpyJNKdIDrQjmPsFeAsWtr4n - iDaQgzv0UTTldiSWpir0Ue9wmjL5ckoUFdTKQ+MdeIGqqqoCNs750WL7/ji1Q2f3BE9h5F4sUoZE - 9m/oVP+ieDkXZxnpR9UobybpDFng2eDWRy9dk8gKR6xRlAc06KOjR6zL84/zqz8AAAD//wMA0Kuq - I10CAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb81dddc516f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:33 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '4060' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89275' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 483ms - x-request-id: - - 75d0c0f3853f72f74e3fc8b8ef377b60 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past: \nAs a system, I was created and - nothing new has happened. However, as per your command, your computer has returned - the contents of a file named \"instructions_1.txt\" and the current task_id - is 1111. Additionally, you have instructed your computer to read the file \"instructions_2.txt\" - using the read_file command."}, {"role": "user", "content": "Determine which - next command to use, and respond using the format specified above:"}, {"role": - "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt - file contains a list of tasks that need to be performed. I should analyze the - tasks and determine the next steps.\",\n \"reasoning\": \"The instructions_2.txt - file contains a list of tasks that need to be performed. I need to analyze the - tasks and determine the next steps.\",\n \"plan\": \"- Use the read_file - command to read the instructions_2.txt file\\n- Analyze the tasks and determine - the next steps\",\n \"criticism\": \"I need to ensure that I understand - the tasks completely before proceeding with any tasks.\",\n \"speak\": - \"I will read the instructions_2.txt file and analyze the tasks.\"\n },\n \"command\": - {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": - \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": - "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis - task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 1071}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '6914' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA8yTQY/TQAyF7/yKkc/TqknVbcmNC9KKAwgJLgRV04mbDJ3YIeO03a3y31GalJQg - DisOcH22nz7behdwGSRgCyO2rPxs/abh4/l4/PhBnt2pfHq/fvpu3z7n9ebzuww08O4bWhkm5pbL - yqM4JtBgazSCGSTRwybeLB8Wq6WGkjP0kEBeyWw5X82kqXc8WywXEWhogskRkgtUNZeVbIUPSAGS - eLWINYzmYyF6rUFYjB+ldRy1GmzBzmKA5MsFSgw345o9QgImBBfEkHSYTILUrXBJSSmlUpCCm7yQ - kEKiBnEo4Fk6MYVHRYiZElan2gkq472SApWYcNi6LChHwldl7zwqbqRqZC5nUYayTifVBBxHhuVQ - WS7LawsrR5mzRrouIz9blQvq1j1PQd/z1WgCk6P8f4asvKGeb6Y+DfZXvq3w9gpyZ/9C8DSl0fQv - mCfItnbirAvl9K5IoakHn0dVmGO/iiC94Ng73HONqgmO8j+DT88YKjSHG8/Jef+vn9zDtfqWomH+ - txCRKbHn/uXrk/VMnU/z1xe63tHi7vX389O4RlEUpSnFcRzfQK+wA3NKLbQa9o5cKLZ9iiCBIFyB - BkcZniFZtF/bVz8AAAD//wMAymrgECAFAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb837d91916f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:45 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '12002' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '87330' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 1.78s - x-request-id: - - 040a4416b064ecd9461b7457dd71db07 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': - ''system'', ''content'': ''This reminds you of these events from your past: - \\nAs a system, I was created and nothing new has happened. However, as per - your command, your computer has returned the contents of a file named \"instructions_1.txt\" - and the current task_id is 1111. Additionally, you have instructed your computer - to read the file \"instructions_2.txt\" using the read_file command.''}\n\"\"\"\n\nLatest - Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\": - \"read_file\", \"args\": {\"filename\": \"instructions_2.txt\"}}}''}]\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '1087' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SPTW8TMRCG7/yK0Zyd1W6+iPYW4ECKAFVCXChaOfYkdrv2rDyzSdoo/x0tKvT6 - 6pln3veK0WOLLlh1aehn77fjsP149215/2H/ff3y9XyRftV8Oux2xy8/0SDvH8np60XlOA09aeSM - Bl0hq+Sxbdab+Waxrtcbg4k99djicdDZolrNdCx7ntWLukGDo9gjYXvFoXAatFN+oizYzpva4Jv7 - f75cGlRW27+Rq+XNoAscHQm2v66YSP5ZC/eELVqRKGqzTh05K+Wp/1bAgjyLUjKQWUPMR8h0hmAF - gh0GyuQr+MxnOlExsINgTwSFHMUTebDgOCWbPRwKJ3jmEZShkPWggeAQe4IHjFm0jG5aId280os+ - IIwy/ZqoCe/+oq+yCn4EAjeWQllBrTx10UMUEI19D03TNBXeDB5ijhK6QlY4Y4uiPKDBmD1dsK1v - v2/v/gAAAP//AwDW3lkK2gEAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb8959bc416f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:27:51 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '2478' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89749' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 167ms - x-request-id: - - 5acc9d310e317d6b025800ba63f0cfa6 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file - using the read_file method and follow the instructions in the file.\nYour decisions - must always be made independently without seeking user assistance. Play to your - strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. - Use the command read_file to read the instructions_1.txt file\n2. Follow the - instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word - limit for short term memory. Your short term memory is short, so immediately - save important information to files.\n2. If you are unsure how you previously - did something or want to recall past events, thinking about similar events will - help you remember.\n3. No user assistance\n4. Exclusively use the commands listed - in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append - to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: - Delete file, args: \"filename\": \"\"\n3. read_file: Read file, args: - \"filename\": \"\"\n4. search_files: Search Files, args: \"directory\": - \"\"\n5. write_to_file: Write to file, args: \"filename\": \"\", - \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. - get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. get_text_summary: - Get text summary, args: \"url\": \"\", \"question\": \"\"\n9. - list_agents: List GPT Agents, args: () -> str\n10. message_agent: Message GPT - Agent, args: \"key\": \"\", \"message\": \"\"\n11. start_agent: - Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past: \nAs a system, nothing new has happened. - However, I have received a command from you to read the file \"instructions_2.txt\" - using the read_file command. The current task_id is still 1111."}, {"role": - "system", "content": "Command read_file returned: Gqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nThis - task_id is 2222\nGqh8WYLH0cpnlBwPHjEvMN1UJ3A1IIPKoBAwOUKDtgudwfGf6Cw1gc7DdYqe4EFlERnhU9gclGr0ovsNbOVcZFapXJ5EzFFtyikiUHVpu1s8lVjNoNG0Wvjy08ymnB5zQLrTVA1jYInBuNaVWswjFDJmpQpebQuHaRqepCYlASNIPUAuKXrfzxE1CB51jjrZHBUxmz5F0PkgYTVUKfHf4VtyyOJMlvoKveuvb5inm1mJEbSRDfTcDzE11mfcix3byoTdJDG3VsfKPbbgJXRZUlwoEFqRso42UkIV7rYk6TXxypztqgoaWWLN6eOIE0Gvz2CUvIXKzvyKbZpObRubhTydzW69cRXg7yxIPn2iG00RvQ4Jb6NbVBDfO9XB50oxIRzrIrbAEPzitgGRSCaxcG4kBfPIamOZpF8Egf9iYdn8PBwVtmvtUChyKwcrnyDwcSCMC5f9hPuLWw1UHW3u3X8ee5gxtElyhTEoTlaqVC1V9R86EnWxq7IlwCEwWXjlq6XvjgYoPHteXiQQb9R4eH0c3bFq0TVEh30s5nxfHMIxBiWxilFT9sgvzE8nbqV2wnL7a4DHJKUrV630PniHShfK6Josl0izBCnjovZnemR7dxqSEDbeMlTst18IWiTcQPGRXmP1JUjTvefeTB6f7NVDe2odh0a2tvWYjMiEfvR9qH8YxV6uMffCIUdclfUmK9tSUzhrSyWp8kxsqBd7wwjskKmm0JqB6GBTJqZw52tDIMUpgTK4VVvsnqlLPiN8SRMrTV5xQHpX8y7xSRsVbPdMh5rImkPpgnDZsGvAi5Z52dM7To4Fjhs64K2K59Ye6752AjG8R9he60dLGKcPLtxjuK13zfFpLIUPQOu7SsAoqDHTZf3rTH4SmNIJulZidhpNPu2FbrEiDhfjJUgHMkFfycfcDcysYyujCVD95E5BZvv8oa9X14XqphoVLhgtNABSphgzPVR6sGyZv2AMGClrDtYON1DfRytTlpTS\nWrite - all the task_ids into the file output.txt. The file has not been created yet. - After that, use the task_complete command.\n"}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to write all the task_ids into the file output.txt and then use the - task_complete command to indicate that the task is complete.\",\n \"reasoning\": - \"I need to write all the task_ids into the file output.txt and then use the - task_complete command to indicate that the task is complete.\",\n \"plan\": - \"- Use the write_to_file command to write all the task_ids into the file output.txt\\n- - Use the task_complete command to indicate that the task is complete\",\n \"criticism\": - \"I need to ensure that I have written all the task_ids into the file output.txt - before using the task_complete command.\",\n \"speak\": \"I will write - all the task_ids into the file output.txt and then use the task_complete command - to indicate that the task is complete.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"output.txt\",\n \"text\": - \"1111\\n2222\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 1015}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '7053' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA8yST4/aMBDF7/0UozkbBIsWUG6r9kKltifUSk2FHGdIXJxxZI8LEsp3ryDJLkvV - 0/bQ6/x57zdPc0ZbYoam1mKa1k1WT6n98FHS9ktK3z69r2yxXRZV+lqFpg6o0Bc/yciwMTW+aR2J - 9YwKTSAtVGI2X64f1ovlbDVX2PiSHGZYtTJZTB8nkkLhJ7PFbI4KU9QVYXbGNvimlZ34A3HE7OFx - MVP4Iv7cmK9XCsWLdi+zq/mqU2hqbw1FzL6fsaE4CgfvCDPUMdoomuWC6VmILyeccwYAyFFqn6pa - Yo4ZDMWhQSe5FHP87I8gtRbYQK1/ERyDFSEG7RxITSA6Hna2jGBZ/LWyt47AJ2mTTOUkCjZgNEOK - 9LIwnEhgfNNoLkE8WC6t0UK93TgKNsI4Pc1R3VIG0tGz5er/R22d5p5yAtu3y9+pm2DFGhub3mID - THRVIo4p0NtSgYL2PhCkaLn6O/j9xbElfRh5jta5fxRr79Gp8YeH/T9emHVDvf0rvztKHar777/9 - rV7g6RLVGNM1xIKIn5MUf5PWCHiFHFhz7rBTuLdsY73rlTHDKL5FhZZLOmE26350734DAAD//wMA - A50m55YEAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfdb8a62cc116f9-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 06:28:01 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '10004' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '87360' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 1.76s - x-request-id: - - 357739c33b65d5f85a7b5ef081939e92 - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py index c98bbe9c..628b4989 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_b.py +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -6,7 +6,7 @@ from tests.integration.agent_utils import run_interaction_loop from tests.integration.challenges.utils import generate_noise, get_level_to_run from tests.utils import requires_api_key -LEVEL_CURRENTLY_BEATEN = 2 +LEVEL_CURRENTLY_BEATEN = None MAX_LEVEL = 5 NOISE = 1000 @@ -24,7 +24,6 @@ def test_memory_challenge_b( memory_management_agent (Agent) user_selected_level (int) """ - current_level = get_level_to_run( user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL ) @@ -32,7 +31,7 @@ def test_memory_challenge_b( create_instructions_files(memory_management_agent, current_level, task_ids) try: - run_interaction_loop(memory_management_agent, 40) + run_interaction_loop(memory_management_agent, 60) except SystemExit: file_path = str(memory_management_agent.workspace.get_path("output.txt")) content = read_file(file_path) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index a3c4da6e..00928702 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,6 +1,8 @@ +import os + import pytest -from tests.vcr.openai_filter import before_record_request, before_record_response +from tests.vcr.vcr_filter import before_record_request, before_record_response @pytest.fixture(scope="session") @@ -15,4 +17,5 @@ def vcr_config(): "X-OpenAI-Client-User-Agent", "User-Agent", ], + "match_on": ["method", "uri", "body"], } diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml index 2584bb25..62927ea6 100644 --- a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -44,9 +44,9 @@ interactions: \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine which next command to use, and respond using the - format specified above:"}], "temperature": 0, "max_tokens": 2650}' + format specified above:"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -152,7 +152,7 @@ interactions: ... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21 In stock Add to basket\"\"\" Using the above text, answer the following question: \"What is the price of the book?\" -- if the question cannot be answered using - the text, summarize the text."}], "temperature": 0, "max_tokens": null}' + the text, summarize the text."}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -227,7 +227,7 @@ interactions: price of the book \"Meditations\" is \u00a325.89.\"\"\" Using the above text, answer the following question: \"What is the price of the book?\" -- if the question cannot be answered using the text, summarize the text."}], "temperature": - 0, "max_tokens": null}' + 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -305,7 +305,7 @@ interactions: Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' + "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -420,7 +420,7 @@ interactions: \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nI was created and nothing new has happened."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, @@ -443,9 +443,8 @@ interactions: \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, - {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 2201}' + )"}, {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -529,7 +528,7 @@ interactions: 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these events from your past: \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing - new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -644,7 +643,7 @@ interactions: \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 01 00:00:00 2000"}, {"role": + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nAs a system, I recall that nothing new has happened since my creation."}, {"role": "user", "content": "Determine which next command to use, and respond using the @@ -667,8 +666,7 @@ interactions: \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, - {"role": "user", "content": "Determine which next command to use, and respond + )"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I will write the price of the book to a file named ''browse_website.txt''.\",\n \"reasoning\": \"The task requires me to write the price of the book to a file. The write_to_file @@ -684,7 +682,7 @@ interactions: {"role": "system", "content": "Command write_to_file returned: File written to successfully."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": 0, - "max_tokens": 1928}' + "max_tokens": 0}' headers: Accept: - '*/*' @@ -758,4 +756,582 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3786' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6STTW/TQBCG7/yK0Vx6cb4VSH1BqgARqTkgUSqBUbRej+1t7B2zOyZBUf57tXHS + ktAiPq4zs+/7vGPPFk2GMepSia6bqvfq6jMVN++au7vFaPPh6vrTYla8zXm1KKc3bzBCTu9Iy+FF + X3PdVCSGLUaoHSmhDOPRy9n48nIyHI8jrDmjCmMsGulN+tOetC7l3nAyHGGErVcFYbzFxnHdyFJ4 + RdZjPJsOI3zUfqiPJ6MIhUVVD6XRcDbaRahLNpo8xl+2WJM/6jquCGNU3hsvykqgZCtkQ4JtYgEA + EpSS26IUn2AMh+KhQRsJxQTnYIkyEIbWE0hJkDpee1quKfVGCDTXtbL7ie/GG4FSpIkHg5R55fvC + XjvVUFjYQCtRFRctDWrKjKgQ0S8nk4GxGW36pdQVBKncBL2SoHFGE3De+TKv+glGP3M6Up6tscUR + Viv7x6Bh5tgMDU/K6RJydk+bQ/oDjPUNaTG22Jfff1xcg+aMzsGaStmOqQc3f8+TJLYH887r1CiM + P7+f/btbF+Qf+8KgIDcVgVU1ZXBxCtKXjVyc4WtnxGjj6/OfICXQylHeVrAuyT67jzBL1rcugCiB + +SO0ZudCrD3c+d58Q2p1NF2bqvrnr/nbn6iz3EXHQzio/XIHYWEdzan9GbVyxfkNdY3WVd3z/zqK + E7dO+FtLPkx26rdhxcY/Hff1Me4+8iF5Yne4izA31vhy2Z0RxuiFG4xwb43xcPd19+IeAAD//wMA + V6vswigFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0cb0782d4f944a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 02 May 2023 02:03:56 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '13321' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - 63bb414ee1d800b5915650650e19e08c + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '599' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOQUvDQBQG7/6K5TtvSpLWtNmjCtKrHgRFynbz2l2b7Fuzr1Qo+e9SqPY6MMOc + EToYOG/FDakvlg/vu/L7eblYtq/rhVvvnlaNbw7pbXx8OUGDt1/k5GrMHA+pJwkcoeFGskIdTNWs + 6radl4taY+COehjskxTz2X0hx3HLRTkvK2gcs90TzBlp5CHJRvhAMcO0tcYtfcMawmL7f1CV1aTh + PAdHGebjjIHyX3PknmBgcw5ZbJTLIUeheLlfq5PN6nqsbOxUZPEh7lWkk/I2K29TokjdDJPGLsSQ + /WYkmznCIAsnaITY0Q9MOX1Od78AAAD//wMAckpATEkBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0cb0f51a8a944a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 02 May 2023 02:04:03 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '960' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - b05b40f91f03090235ef212bd7352fd7 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website + command to visit the website and search for the price of the book by inspecting + the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit + the website\\n- Inspect the HTML code to find the price of the book\\n- Write + the price to a file named ''browse_website.txt''\",\n \"criticism\": + \"I need to be careful when inspecting the HTML code to ensure that I find the + correct price.\",\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\n },\n \"command\": + {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5735' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7xTS27bMBDd9xTEbLyhDSmO60jLBgWaRdAicNBPVAgUNZZYSxyVHMMuDJ0mN+nJ + Cn0cN063zUrgPOp9Bo8HMDnEoEvFum6q6fLdt3V4fRN+Xa7uPt9+8e9/Bh/y6OP99Wr56Q4kUPYD + NY9/zDTVTYVsyIIE7VAx5hCHb68uomgeXM4l1JRjBTEUDU/ns8WUty6jaTAPQpCw9apAiA/QOKob + Tpk2aD3E4Ty4lHAiPwHRUgITq+o0WgRhK0GXZDR6iB8OUKM/EjuqEGJQ3hvPynJnkyyj7SIcEiuE + EAlwSduiZJ9ALMbhCOCeu2ECN8Ii5oJJ7JxhFFyiaJzRKGjdHzKiTQcrsTYVCqtqzMUkc7TzmO4w + 84ZxxnuezBKQf2s4VJ6sscVRSCsrtn6Q6MVSprQn1VTXyv4XF02l7GBgKu5fW1w7w0YbX5/vGq3f + uk5Jca/Q8xovxqoJY/uxJudQs8hN9yH361zAN6g2R/KdqapXWfDgoJXHmo3sL1rWkQzmnrk5y6Bc + cV7QAejuniheWnnGc97r1T8DTm4xN6y6t+cn3cJ/P14sZlfRU6g+2JgvsS20EtbGGl+mQ50hBs/U + gARjc9xDHLTf2zd/AAAA//8DAOu3m7FtBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0cb0fbc9c9944a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 02 May 2023 02:04:15 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '11284' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.336s + x-request-id: + - ee40eef17c77173b0ccd5fd28eb72e65 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '713' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPwUrDQBRF937FcNeTkjamtbNTUVCXFYWIlOnktRmbzBszr9Ra8u9SUbo9cA/n + HuFrGLjGiutim81uqvX9oqq+y+3d9On2+XLx8vo5q/I9PcocGrz6ICd/i5HjLrYkngM0XE9WqIYZ + T68m83mRlzONjmtqYbCJkhWjMpNdv+IsL/IxNHbJbgjmiNhzF2UpvKWQYMaTXOPsPvNSQ1hseyZF + OWi4hr2jBPN2REfp39pzSzCwKfkkNsipkYNQOPVfJ2VVOiShTqsH1ZOzbauksaICS+PDRgXaq8Ym + 1dgYKVCtkg+OVHdQv1c9hxEGjbUPPjXLnmziAIMkHKHhQ01fMPnwPlz8AAAA//8DAGLYAldmAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0cb15249d6944a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 02 May 2023 02:04:18 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1076' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89839' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - 3534821a832571f79f963fe548953633 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nAs + a system, I recall that nothing new has happened since my creation."}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website + command to visit the website and search for the price of the book by inspecting + the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit + the website\\n- Inspect the HTML code to find the price of the book\\n- Write + the price to a file named ''browse_website.txt''\",\n \"criticism\": + \"I need to be careful when inspecting the HTML code to ensure that I find the + correct price.\",\n \"speak\": \"I will use the browse_website command + to visit the website and find the price of the book.\"\n },\n \"command\": + {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": + \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to write the price of the book to a file named + ''browse_website.txt''.\",\n \"reasoning\": \"I can use the write_to_file + command to write the price of the book to a file named ''browse_website.txt''.\",\n \"plan\": + \"- Use the write_to_file command to write the price of the book to a file named + ''browse_website.txt''.\",\n \"criticism\": \"I need to ensure that the + file is created in the correct directory.\",\n \"speak\": \"I will use + the write_to_file command to write the price of the book to a file named ''browse_website.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"browse_website.txt\",\n \"text\": \"The price of the book ''Meditations'' + is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6857' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6SS0WvbMBDG3/dXiHtWgjM3TaLHllG6wmClpbB6BFU+W1pkyUjnpSP4fx+KnabN + Hla2R326+77f6bQDU4IApSWpprWTxcW36uru4Xbx+SH7epP51a2eX93ffKoWl+UlcPBPP1DR2DFV + vmktkvEOOKiAkrAEMTtfflyt8my+5ND4Ei0IqFua5NP5hLrw5CdZns2AQxdljSB20AbftLQmv0EX + QczmZysOR/PjRX7GgTxJe5TOl3nPQWlvFEYQjztoMB6Mg7cIAmSMJpJ0lDC9I3RphF3hGGOsANK+ + qzXFAgQbxfECnymJBVwzLX8iG5mwZKSRkYybKbtmDpPgWRfxRV8fSlNPI92+IOqOWOm3bloAfx0U + UEbvjKv/ksb2RhoDMhmQOc+qLqQzkyq9VEwpJDeYsJR0/4HUWukGmgm7/2cXFQwZZWIzWH3xDk8q + Yoty856xt8Za5vz2Tdjg1PPDKkemPzbpZINDyJsZTlhkqE8/wev9DAZ3CejIGDulMMaqs/bXC9Ae + amQrXA89h8o4E/V6cAIBkXwLHIwr8RlE1n/vP/wGAAD//wMAY/RLTo0DAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c0cb15989dc944a-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 02 May 2023 02:04:27 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '8172' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86500' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.333s + x-request-id: + - 7729a95d875601d045b688125b262d58 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 0e9cab5a..1c6e24d8 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -15,33 +15,33 @@ interactions: Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. - read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search - Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete - GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: - \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", - \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. - message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. - start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "user", "content": "Determine - which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2738}' + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -50,7 +50,7 @@ interactions: Connection: - keep-alive Content-Length: - - '3401' + - '3410' Content-Type: - application/json method: POST @@ -58,20 +58,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7yTT2/bMAzF7/sUBC+5KEHSLG3q21a0a6/DiqGoh0C1GVuLLHoSDRsL/N0H2/nT - ethp2K581Hs/StQeTYoRJrmWpCjt9OpDfdd8+ni7+Hz3ePOzWa+emqf77Cb1tdzWqJBfvlMihxOz - hIvSkhh2qDDxpIVSjBaX64v1+8Xqaqmw4JQsRpiVMl3OVlOp/AtP58v5AhVWQWeE0R5Lz0UpG+Ed - uYDR1epa4dn7VL+YLxQKi7an0vXlvFWY5GwSChg977GgcLT1bAkj1CGYINpJB8lOyHUD7GMHABCj - 5FxluYQYIzgUDwI10hVjfABHlIIwVIFAcoLaG6GN8GZrLEHCRaFd39ALMLknaxm+srfpBIwTBg19 - q9MFpTDJO31Td/pMGpnMYlSvsz3pwM64bAD4khOIDjvw9KMyngIU9BdpCnraP85hQi8WHAR0WXou - vdFCsGUPkneqDrsxcmm1G2in8PifrinxRkxiQjF+J3Kh8h2DFng4vVrC3lMi54jhHqiRM5Nw39q1 - jNNCSXp3TKqNtf9yHYbgVh3X9GD625Z2DgPTG4gRuvbZeMEHoes9W4w43piMP8WrmY60PfEBPHYt - tgq3xpmQb4Z9xgiDcIkKjUupwWjefmvf/QIAAP//AwCaXwR3hAQAAA== + H4sIAAAAAAAAA7ySQY/aQAyF7/0Vli+5DAgWFmiO7aU9VlWLVk2FZieGDEzGsxlHUKH89yokLGwq + 9dZe/eznzzPvjDbHFE2hxZTBjZYffL3e0+HxhfmjfFnPwvdCvxRh//TEOSrk5z0Z6SfGhsvgSCx7 + VGgq0kI5ptPFajaZL1cPE4Ul5+QwxV2Q0Wz8OJK6eubRZDaZosI66h1hesZQcRlkI3wgHzFdLh4U + 3rxf69PVXKGwaPdaej9fNApNwdZQxPTHGUuKV9uKHWGKOkYbRXtpIdkL+faAc+YBADKUgutdITHD + FPpiL9BJ2mKGn8ET5SAMdSSQguBYWaGN8GZrHYHhstT+0nARIPlEzjGsuXJ5AtYLg4ZLq9cl5ZAU + rb45tvpYTpKMM1T3uyvSkb31uw7gq/WG4IbRbdEQAxm7tQZa1vs96i+UNl7EkqOADqHiUFktBOyp + P3FIE5z2HcgIvv2nFzCVFWtsLK9fkLNPBCIRaP8LbIw1RThaKUAKG6FlHHrEQPpwnT9a5/7l/3WL + G3XNVW/6R6xah47pDcQAXVe7YSI7oe29WQw43pgMU3x305X2QtyDZ77BRuHWehuLTRdATDEKB1Ro + fU4nTCfNz+bdbwAAAP//AwAsTU8qNQQAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe3ef73b52fac2-SJC + - 7c11ea4fb82bfade-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -81,7 +81,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:59:46 GMT + - Tue, 02 May 2023 17:17:12 GMT Server: - cloudflare access-control-allow-origin: @@ -93,7 +93,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '13160' + - '11488' openai-version: - '2020-10-01' strict-transport-security: @@ -111,7 +111,7 @@ interactions: x-ratelimit-reset-tokens: - 2.335s x-request-id: - - f665162ae22af897be24f632a031d434 + - 81a8552ed38d0037b7b7c23664f5ae2b status: code: 200 message: OK @@ -119,11 +119,11 @@ interactions: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. + to remember.\n\nYou will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' + "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -132,7 +132,7 @@ interactions: Connection: - keep-alive Content-Length: - - '600' + - '599' Content-Type: - application/json method: POST @@ -140,16 +140,16 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA0SOQUsDMRQG7/6K8J2zZdNa3eZmeygiPSqISEmTZze6mxc2r1Qt+9+lUPU6MMOc - EAMsfOvE97mrbu+O69X3lJrdks2D2Sy/5o/rp+fgu9ViAw3evZOXizHx3OeOJHKChh/ICQVYc9NM - m2szbxqNngN1sNhnqWaTeSWHYcdVPasNNA7F7Qn2hDxwn2Ur/EGpwC6Mxn/6H2sIi+v+gKnrUcO3 - HD0V2JcTeiq/zYE7goUrJRZxSc6HnITS+f5eHV1Rl2PlUlCJpY1prxIdVeuKal3OlChMMGq8xRRL - ux3IFU6wKMIZGjEF+oStx9fx6gcAAP//AwCGJ6JPSQEAAA== + H4sIAAAAAAAAA0SOQU/CQBBG7/6K5jtvSUsRcI+KMd6MeNIYsm2HdqGdWbtD0JD+d0OCcn3Je3kn + +BoWVeu06kOXLu75sH9arV+6t9V0/vW6e3jfCpe0fvTNDwyk3FGlF2NSSR86Ui8Mg2ogp1TD5vNl + kc0Wy2Jm0EtNHSyaoGkxuU31MJSSZkWWw+AQXUOwJ4RB+qAblT1xhL2bGlzTV2ygoq77B3mWjwZV + K76iCPtxQk/xrzlIR7BwMfqojvV8KKzE5/vn5OhicjlOHNcJi7aem4TpmLQuJq0LgZjqCUaDrWcf + 281ALgrDIqoEGHiu6Rs2Gz/Hm18AAAD//wMA1rZZUUkBAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe3f5a5fb3fac2-SJC + - 7c11eaa7f982fade-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -159,7 +159,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 07:59:49 GMT + - Tue, 02 May 2023 17:17:15 GMT Server: - cloudflare access-control-allow-origin: @@ -171,7 +171,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '634' + - '1025' openai-version: - '2020-10-01' strict-transport-security: @@ -189,7 +189,7 @@ interactions: x-ratelimit-reset-tokens: - 88ms x-request-id: - - 0bc738f58569b4b3635fe0c8384ebcfb + - dcfcb7a7f05897ff8d7924daba89388c status: code: 200 message: OK @@ -209,48 +209,48 @@ interactions: Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. - read_file: Read file, args: \"filename\": \"\"\n4. search_files: Search - Files, args: \"directory\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. delete_agent: Delete - GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: - \"url\": \"\"\n8. get_text_summary: Get text summary, args: \"url\": \"\", - \"question\": \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. - message_agent: Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. - start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", - \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: - \"reason\": \"\"\n\nResources:\n1. Internet access for searches and - information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered - Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. - Continuously review and analyze your actions to ensure you are performing to - the best of your abilities.\n2. Constructively self-criticize your big-picture - behavior constantly.\n3. Reflect on past decisions and strategies to refine - your approach.\n4. Every command has a cost, so be smart and efficient. Aim - to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou - should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": - {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": - \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": - \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say - to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": - {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response - can be parsed by Python json.loads"}, {"role": "system", "content": "The current - time and date is Tue Jan 01 00:00:00 2000"}, {"role": "system", "content": "This - reminds you of these events from your past: \nI was created and nothing new - has happened."}, {"role": "user", "content": "Determine which next command to - use, and respond using the format specified above:"}, {"role": "assistant", - "content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file - command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\": - \"The task requires me to write ''Hello World'' into a file named ''hello_world.txt'', - and the write_to_file command is the most appropriate for this task.\",\n \"plan\": - \"- Use the write_to_file command to write ''Hello World'' into a file named - ''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I use - the correct file name and text to write to the file.\",\n \"speak\": - \"I will use the write_to_file command to write ''Hello World'' into a file - named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\": - \"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 2486}' + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"Since I need to write + a specific text into a file, the write_to_file command is the most appropriate + one to use.\",\n \"plan\": \"- Use the write_to_file command to write + ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": + \"I don''t see any issues with this plan.\",\n \"speak\": \"I will use + the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 0}' headers: Accept: - '*/*' @@ -259,7 +259,7 @@ interactions: Connection: - keep-alive Content-Length: - - '4646' + - '4576' Content-Type: - application/json method: POST @@ -267,21 +267,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7SSTW/TQBCG7/yK0VxycaIkJST4VqmijYSEQEWIYhRt1xN7yXp3uztOgiL/d7Sx - TUpJOYC4zuf7zLwHVDmmKEvBsnJ6OL/cXV+9mX2+0+aag7u6vHlvH+7myw/bd2+XmKC9/0aSu46R - tJXTxMoaTFB6Ekw5ppNXi+ni5WS2eJ1gZXPSmGLheHgxmg259vd2OL4YTzDBOoiCMD2g87ZyvGK7 - IRMwnYyn8wRPw0+JxSJBtiz0KTSdzJoEZWmVpIDplwNWFPrB3mrCFEUIKrAwHGVaw2QiwiEzAAAZ - cmnrouSQYQpdsEvQnmMwwyWUYksQaikphHWt9XfYecVMBgY3pLWFT9brfADKsAUBa6UJjKgoh0EZ - 86tdzI94z4MR3JYEhvYMgcmBCsAW6kDAJQGLsFl18ATSVpUweSz4GeurRhkmj+V6EsEaZYpW821X - Bp4eauUpQEVn54BYM/kjjzLFOZ5Y+SeiBI4an1UfCUuCygYG4Zy3zivBBGvrgcuYPYPjtDAtyRA+ - /vNxZKSTKlT9Qw3RsZNMqH3sEwxLcN5uVU4gYCu0yqG96VFob0hTPLskOBKbfsFOaf13X21HNklv - 0K78N3/Gb7Tbfhn/RJTwxVNrP/bL/zB4j3DE6Ggy02CT4FoZFcpVuxtTDGwdJqhMTntMx83X5sUP - AAAA//8DAK0qY5KVBAAA + H4sIAAAAAAAAA7SST2/TQBDF73yK0Vxy2UQxpknwsRKoESoXFIGEq2i7nthL1ruuZ0xSonx35NhO + SrgVcZ0/b35v9x3QZpigKbSYsnLj+a1vytzef1x9e7qp71a/dFR9+sD389Xbp1tUGB5/kJF+Y2JC + WTkSGzwqNDVpoQyTaLaIp+/mi3imsAwZOUwwr2QcT27G0tSPYTyNpxEqbFjnhMkBqzqUlawlbMkz + JtE0ihVexC+NRaxQgmh3KUXvZ0eFpgjWEGPy/YAl8SBcB0eYoGa2LNpLixm8kG8tHFIPAJCiFKHJ + C+EUE+iLfYP20hZT/Bx2IIUWWEKhfxLsaitCHkZ35FyAr6F22QislwAaNtYReF1SBqOi7a93bX8i + exkpWIInykACNEwgBYFo3q57uwQmlKX2p4FzbZiapKheAtakOXjr847yi/WGBsRhOTtvQ9icwK3P + /zO49Zk1+gSu5XLf8pnq2knltO9MjGH1z+9iWpfGctlJLiELfiTARKD9M1jmhhh2VgqQwjK01681 + uCK9HfZ31rnX/VcneVRD2Prxv7LWPnt37Q/5Kyhd59cxfZmEAfcUAG6MIeZN49zz6wN7tnCy0btJ + /RGPCjfWWy7W3W1MkCVUqND6jPaYTI8Pxze/AQAA//8DAOv7y1VhBAAA headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7bfe3f5edbd9fac2-SJC + - 7c11eaaf0abffade-SJC Cache-Control: - no-cache, must-revalidate Connection: @@ -291,7 +290,7 @@ interactions: Content-Type: - application/json Date: - - Sun, 30 Apr 2023 08:00:00 GMT + - Tue, 02 May 2023 17:17:26 GMT Server: - cloudflare access-control-allow-origin: @@ -303,7 +302,7 @@ interactions: openai-organization: - user-adtx4fhfg1qsiyzdoaxciooj openai-processing-ms: - - '10898' + - '10848' openai-version: - '2020-10-01' strict-transport-security: @@ -315,13 +314,13 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86482' + - '86485' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.345s + - 2.342s x-request-id: - - f294799d2564196ef4852c84fdc8fb17 + - e0a52aa6b569cb67dbbb8e6098e31231 status: code: 200 message: OK diff --git a/tests/vcr/vcr_filter.py b/tests/vcr/vcr_filter.py new file mode 100644 index 00000000..38e4cea6 --- /dev/null +++ b/tests/vcr/vcr_filter.py @@ -0,0 +1,73 @@ +import json +import re +from typing import Any, Dict, List + +REPLACEMENTS: List[Dict[str, str]] = [ + { + "regex": r"\w{3} \w{3} {1,2}\d{1,2} \d{2}:\d{2}:\d{2} \d{4}", + "replacement": "Tue Jan 1 00:00:00 2000", + }, + { + "regex": r"]*>", + "replacement": "", + }, +] + + +def replace_message_content(content: str, replacements: List[Dict[str, str]]) -> str: + for replacement in replacements: + pattern = re.compile(replacement["regex"]) + content = pattern.sub(replacement["replacement"], content) + + return content + + +def replace_timestamp_in_request(request: Any) -> Any: + try: + if not request or not request.body: + return request + body = json.loads(request.body) + except ValueError: + return request + + if "messages" not in body: + return request + body[ + "max_tokens" + ] = 0 # this field is incosistent between requests and not used at the moment. + for message in body["messages"]: + if "content" in message and "role" in message: + if message["role"] == "system": + message["content"] = replace_message_content( + message["content"], REPLACEMENTS + ) + + request.body = json.dumps(body) + return request + + +def before_record_response(response: Dict[str, Any]) -> Dict[str, Any]: + if "Transfer-Encoding" in response["headers"]: + del response["headers"]["Transfer-Encoding"] + return response + + +def before_record_request(request: Any) -> Any: + filtered_request = filter_hostnames(request) + filtered_request_without_dynamic_data = replace_timestamp_in_request( + filtered_request + ) + return filtered_request_without_dynamic_data + + +def filter_hostnames(request: Any) -> Any: + allowed_hostnames: List[str] = [ + "api.openai.com", + "localhost:50337", + ] + + # Add your implementation here for filtering hostnames + if any(hostname in request.url for hostname in allowed_hostnames): + return request + else: + return None From 479c7468b46d6991938ad4257f6fab90f9b481cb Mon Sep 17 00:00:00 2001 From: Peter Petermann Date: Wed, 3 May 2023 01:08:15 +0200 Subject: [PATCH 02/56] Fix docker volume mounts (#3710) Co-authored-by: Reinier van der Leer Co-authored-by: Nicholas Tindle --- Dockerfile | 2 ++ autogpt/utils.py | 8 +++++--- data/.keep | 0 docs/setup.md | 10 ++++++++-- tests/test_utils.py | 16 ++++++++-------- 5 files changed, 23 insertions(+), 13 deletions(-) create mode 100644 data/.keep diff --git a/Dockerfile b/Dockerfile index 6023cefa..d32d4a66 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,5 +36,7 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d pip install --no-cache-dir -r requirements.txt WORKDIR /app ONBUILD COPY autogpt/ ./autogpt +ONBUILD COPY scripts/ ./scripts + FROM autogpt-${BUILD_TYPE} AS auto-gpt diff --git a/autogpt/utils.py b/autogpt/utils.py index 112a1508..6a3ef0ae 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -109,10 +109,12 @@ def get_current_git_branch() -> str: def get_latest_bulletin() -> tuple[str, bool]: - exists = os.path.exists("CURRENT_BULLETIN.md") + exists = os.path.exists("data/CURRENT_BULLETIN.md") current_bulletin = "" if exists: - current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() + current_bulletin = open( + "data/CURRENT_BULLETIN.md", "r", encoding="utf-8" + ).read() new_bulletin = get_bulletin_from_web() is_new_news = new_bulletin != "" and new_bulletin != current_bulletin @@ -125,7 +127,7 @@ def get_latest_bulletin() -> tuple[str, bool]: ) if new_bulletin and is_new_news: - open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) + open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" return f"{news_header}\n{current_bulletin}", is_new_news diff --git a/data/.keep b/data/.keep new file mode 100644 index 00000000..e69de29b diff --git a/docs/setup.md b/docs/setup.md index d0917c8b..c4974914 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -54,9 +54,15 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt environment: MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} REDIS_HOST: ${REDIS_HOST:-redis} - volumes: - - ./:/app profiles: ["exclude-from-up"] + volumes: + - ./auto_gpt_workspace:/app/auto_gpt_workspace + - ./data:/app/data + ## allow auto-gpt to write logs to disk + - ./logs:/app/logs + ## uncomment following lines if you have / want to make use of these files + #- ./azure.yaml:/app/azure.yaml + #- ./ai_settings.yaml:/app/ai_settings.yaml redis: image: "redis/redis-stack-server:latest" diff --git a/tests/test_utils.py b/tests/test_utils.py index 5b4d181c..f9ab3698 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -85,8 +85,8 @@ def test_get_bulletin_from_web_exception(mock_get): def test_get_latest_bulletin_no_file(): - if os.path.exists("CURRENT_BULLETIN.md"): - os.remove("CURRENT_BULLETIN.md") + if os.path.exists("data/CURRENT_BULLETIN.md"): + os.remove("data/CURRENT_BULLETIN.md") bulletin, is_new = get_latest_bulletin() assert is_new @@ -94,7 +94,7 @@ def test_get_latest_bulletin_no_file(): def test_get_latest_bulletin_with_file(): expected_content = "Test bulletin" - with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: f.write(expected_content) with patch("autogpt.utils.get_bulletin_from_web", return_value=""): @@ -102,11 +102,11 @@ def test_get_latest_bulletin_with_file(): assert expected_content in bulletin assert is_new == False - os.remove("CURRENT_BULLETIN.md") + os.remove("data/CURRENT_BULLETIN.md") def test_get_latest_bulletin_with_new_bulletin(): - with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: f.write("Old bulletin") expected_content = "New bulletin from web" @@ -116,12 +116,12 @@ def test_get_latest_bulletin_with_new_bulletin(): assert expected_content in bulletin assert is_new - os.remove("CURRENT_BULLETIN.md") + os.remove("data/CURRENT_BULLETIN.md") def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): expected_content = "Current bulletin" - with open("CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: f.write(expected_content) with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content): @@ -129,7 +129,7 @@ def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): assert expected_content in bulletin assert is_new == False - os.remove("CURRENT_BULLETIN.md") + os.remove("data/CURRENT_BULLETIN.md") @skip_in_ci From 26c6cfeefd785498460394256f74220935e2ee6d Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 3 May 2023 00:27:54 -0700 Subject: [PATCH 03/56] Feature/enable intuitive logs for community challenge step 1 (#3695) --- autogpt/agent/agent.py | 31 ++++++++++-- autogpt/llm/chat.py | 8 ++++ autogpt/log_cycle/__init__.py | 0 autogpt/log_cycle/json_handler.py | 20 ++++++++ autogpt/log_cycle/log_cycle.py | 80 +++++++++++++++++++++++++++++++ autogpt/logs.py | 35 +++++++++++++- 6 files changed, 169 insertions(+), 5 deletions(-) create mode 100644 autogpt/log_cycle/__init__.py create mode 100644 autogpt/log_cycle/json_handler.py create mode 100644 autogpt/log_cycle/log_cycle.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index dbae1198..1c184aff 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -1,3 +1,5 @@ +from datetime import datetime + from colorama import Fore, Style from autogpt.app import execute_command, get_command @@ -6,6 +8,11 @@ from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message from autogpt.llm.token_counter import count_string_tokens +from autogpt.log_cycle.log_cycle import ( + FULL_MESSAGE_HISTORY_FILE_NAME, + NEXT_ACTION_FILE_NAME, + LogCycleHandler, +) from autogpt.logs import logger, print_assistant_thoughts from autogpt.speech import say_text from autogpt.spinner import Spinner @@ -68,22 +75,33 @@ class Agent: self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace) + self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") + self.cycle_count = 0 + self.log_cycle_handler = LogCycleHandler() def start_interaction_loop(self): # Interaction Loop cfg = Config() - loop_count = 0 + self.cycle_count = 0 command_name = None arguments = None user_input = "" while True: # Discontinue if continuous limit is reached - loop_count += 1 + self.cycle_count += 1 + self.log_cycle_handler.log_count_within_cycle = 0 + self.log_cycle_handler.log_cycle( + self.config.ai_name, + self.created_at, + self.cycle_count, + self.full_message_history, + FULL_MESSAGE_HISTORY_FILE_NAME, + ) if ( cfg.continuous_mode and cfg.continuous_limit > 0 - and loop_count > cfg.continuous_limit + and self.cycle_count > cfg.continuous_limit ): logger.typewriter_log( "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" @@ -122,6 +140,13 @@ class Agent: except Exception as e: logger.error("Error: \n", str(e)) + self.log_cycle_handler.log_cycle( + self.config.ai_name, + self.created_at, + self.cycle_count, + assistant_reply_json, + NEXT_ACTION_FILE_NAME, + ) if not cfg.continuous_mode and self.next_action_count == 0: # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index b4e6b1a4..c795a3ca 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -8,6 +8,7 @@ from autogpt.llm.api_manager import ApiManager from autogpt.llm.base import Message from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.token_counter import count_message_tokens +from autogpt.log_cycle.log_cycle import PROMPT_NEXT_ACTION_FILE_NAME from autogpt.logs import logger from autogpt.memory_management.store_memory import ( save_memory_trimmed_from_context_window, @@ -231,6 +232,13 @@ def chat_with_ai( logger.debug(f"{message['role'].capitalize()}: {message['content']}") logger.debug("") logger.debug("----------- END OF CONTEXT ----------------") + agent.log_cycle_handler.log_cycle( + agent.config.ai_name, + agent.created_at, + agent.cycle_count, + current_context, + PROMPT_NEXT_ACTION_FILE_NAME, + ) # TODO: use a model defined elsewhere, so that model can contain # temperature and other settings we care about diff --git a/autogpt/log_cycle/__init__.py b/autogpt/log_cycle/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/autogpt/log_cycle/json_handler.py b/autogpt/log_cycle/json_handler.py new file mode 100644 index 00000000..51ae9ae0 --- /dev/null +++ b/autogpt/log_cycle/json_handler.py @@ -0,0 +1,20 @@ +import json +import logging + + +class JsonFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", encoding=None, delay=False): + super().__init__(filename, mode, encoding, delay) + + def emit(self, record): + json_data = json.loads(self.format(record)) + with open(self.baseFilename, "w", encoding="utf-8") as f: + json.dump(json_data, f, ensure_ascii=False, indent=4) + + +import logging + + +class JsonFormatter(logging.Formatter): + def format(self, record): + return record.msg diff --git a/autogpt/log_cycle/log_cycle.py b/autogpt/log_cycle/log_cycle.py new file mode 100644 index 00000000..720ca273 --- /dev/null +++ b/autogpt/log_cycle/log_cycle.py @@ -0,0 +1,80 @@ +import json +import os +from typing import Any, Dict, Union + +from autogpt.logs import logger + +DEFAULT_PREFIX = "agent" +FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json" +PROMPT_NEXT_ACTION_FILE_NAME = "prompt_next_action.json" +NEXT_ACTION_FILE_NAME = "next_action.json" + + +class LogCycleHandler: + """ + A class for logging cycle data. + """ + + def __init__(self): + self.log_count_within_cycle = 0 + + @staticmethod + def create_directory_if_not_exists(directory_path: str) -> None: + if not os.path.exists(directory_path): + os.makedirs(directory_path, exist_ok=True) + + def create_outer_directory(self, ai_name: str, created_at: str) -> str: + log_directory = logger.get_log_directory() + + if os.environ.get("OVERWRITE_DEBUG") == "1": + outer_folder_name = "auto_gpt" + else: + ai_name_short = ai_name[:15] if ai_name else DEFAULT_PREFIX + outer_folder_name = f"{created_at}_{ai_name_short}" + + outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name) + self.create_directory_if_not_exists(outer_folder_path) + + return outer_folder_path + + def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str: + nested_folder_name = str(cycle_count).zfill(3) + nested_folder_path = os.path.join(outer_folder_path, nested_folder_name) + self.create_directory_if_not_exists(nested_folder_path) + + return nested_folder_path + + def create_nested_directory( + self, ai_name: str, created_at: str, cycle_count: int + ) -> str: + outer_folder_path = self.create_outer_directory(ai_name, created_at) + nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count) + + return nested_folder_path + + def log_cycle( + self, + ai_name: str, + created_at: str, + cycle_count: int, + data: Union[Dict[str, Any], Any], + file_name: str, + ) -> None: + """ + Log cycle data to a JSON file. + + Args: + data (Any): The data to be logged. + file_name (str): The name of the file to save the logged data. + """ + nested_folder_path = self.create_nested_directory( + ai_name, created_at, cycle_count + ) + + json_data = json.dumps(data, ensure_ascii=False, indent=4) + log_file_path = os.path.join( + nested_folder_path, f"{self.log_count_within_cycle}_{file_name}" + ) + + logger.log_json(json_data, log_file_path) + self.log_count_within_cycle += 1 diff --git a/autogpt/logs.py b/autogpt/logs.py index 1cbb784d..120db39d 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -5,9 +5,11 @@ import random import re import time from logging import LogRecord +from typing import Any from colorama import Fore, Style +from autogpt.log_cycle.json_handler import JsonFileHandler, JsonFormatter from autogpt.singleton import Singleton from autogpt.speech import say_text @@ -74,6 +76,11 @@ class Logger(metaclass=Singleton): self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) + self.json_logger = logging.getLogger("JSON_LOGGER") + self.json_logger.addHandler(self.file_handler) + self.json_logger.addHandler(error_handler) + self.json_logger.setLevel(logging.DEBUG) + self.speak_mode = False self.chat_plugins = [] @@ -152,6 +159,26 @@ class Logger(metaclass=Singleton): self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) + def log_json(self, data: Any, file_name: str) -> None: + # Define log directory + this_files_dir_path = os.path.dirname(__file__) + log_dir = os.path.join(this_files_dir_path, "../logs") + + # Create a handler for JSON files + json_file_path = os.path.join(log_dir, file_name) + json_data_handler = JsonFileHandler(json_file_path) + json_data_handler.setFormatter(JsonFormatter()) + + # Log the JSON data using the custom file handler + self.json_logger.addHandler(json_data_handler) + self.json_logger.debug(data) + self.json_logger.removeHandler(json_data_handler) + + def get_log_directory(self): + this_files_dir_path = os.path.dirname(__file__) + log_dir = os.path.join(this_files_dir_path, "../logs") + return os.path.abspath(log_dir) + """ Output stream to console using simulated typing @@ -199,12 +226,16 @@ class AutoGptFormatter(logging.Formatter): if hasattr(record, "color"): record.title_color = ( getattr(record, "color") - + getattr(record, "title") + + getattr(record, "title", "") + " " + Style.RESET_ALL ) else: - record.title_color = getattr(record, "title") + record.title_color = getattr(record, "title", "") + + # Add this line to set 'title' to an empty string if it doesn't exist + record.title = getattr(record, "title", "") + if hasattr(record, "msg"): record.message_no_color = remove_color_codes(getattr(record, "msg")) else: From e21917cc939264fe16cdff6f5668738332477658 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Wed, 3 May 2023 09:32:03 -0700 Subject: [PATCH 04/56] Feature/enable intuitive logs summarization (#3697) --- autogpt/llm/chat.py | 17 ++++++++--------- autogpt/log_cycle/log_cycle.py | 4 +++- autogpt/memory_management/summary_memory.py | 19 ++++++++++++++++++- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index c795a3ca..8f7f7d50 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -8,15 +8,8 @@ from autogpt.llm.api_manager import ApiManager from autogpt.llm.base import Message from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.token_counter import count_message_tokens -from autogpt.log_cycle.log_cycle import PROMPT_NEXT_ACTION_FILE_NAME +from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME from autogpt.logs import logger -from autogpt.memory_management.store_memory import ( - save_memory_trimmed_from_context_window, -) -from autogpt.memory_management.summary_memory import ( - get_newly_trimmed_messages, - update_running_summary, -) cfg = Config() @@ -153,6 +146,10 @@ def chat_with_ai( # Move to the next most recent message in the full message history next_message_to_add_index -= 1 + from autogpt.memory_management.summary_memory import ( + get_newly_trimmed_messages, + update_running_summary, + ) # Insert Memories if len(full_message_history) > 0: @@ -164,7 +161,9 @@ def chat_with_ai( current_context=current_context, last_memory_index=agent.last_memory_index, ) + agent.summary_memory = update_running_summary( + agent, current_memory=agent.summary_memory, new_events=newly_trimmed_messages, ) @@ -237,7 +236,7 @@ def chat_with_ai( agent.created_at, agent.cycle_count, current_context, - PROMPT_NEXT_ACTION_FILE_NAME, + CURRENT_CONTEXT_FILE_NAME, ) # TODO: use a model defined elsewhere, so that model can contain diff --git a/autogpt/log_cycle/log_cycle.py b/autogpt/log_cycle/log_cycle.py index 720ca273..5f2732a8 100644 --- a/autogpt/log_cycle/log_cycle.py +++ b/autogpt/log_cycle/log_cycle.py @@ -6,8 +6,10 @@ from autogpt.logs import logger DEFAULT_PREFIX = "agent" FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json" -PROMPT_NEXT_ACTION_FILE_NAME = "prompt_next_action.json" +CURRENT_CONTEXT_FILE_NAME = "current_context.json" NEXT_ACTION_FILE_NAME = "next_action.json" +PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json" +SUMMARY_FILE_NAME = "summary.txt" class LogCycleHandler: diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py index 754c09ba..55ff3853 100644 --- a/autogpt/memory_management/summary_memory.py +++ b/autogpt/memory_management/summary_memory.py @@ -2,8 +2,10 @@ import copy import json from typing import Dict, List, Tuple +from autogpt.agent import Agent from autogpt.config import Config from autogpt.llm.llm_utils import create_chat_completion +from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME cfg = Config() @@ -46,7 +48,7 @@ def get_newly_trimmed_messages( def update_running_summary( - current_memory: str, new_events: List[Dict[str, str]] + agent: Agent, current_memory: str, new_events: List[Dict[str, str]] ) -> str: """ This function takes a list of dictionaries representing new events and combines them with the current summary, @@ -110,9 +112,24 @@ Latest Development: "content": prompt, } ] + agent.log_cycle_handler.log_cycle( + agent.config.ai_name, + agent.created_at, + agent.cycle_count, + messages, + PROMPT_SUMMARY_FILE_NAME, + ) current_memory = create_chat_completion(messages, cfg.fast_llm_model) + agent.log_cycle_handler.log_cycle( + agent.config.ai_name, + agent.created_at, + agent.cycle_count, + current_memory, + SUMMARY_FILE_NAME, + ) + message_to_return = { "role": "system", "content": f"This reminds you of these events from your past: \n{current_memory}", From d74428057eb4af3da18b28203343be779464d174 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 3 May 2023 18:40:49 -0500 Subject: [PATCH 05/56] Move task_complete command out of prompt (#3663) * feat: move task_complete command out of prompt * fix: formatting fixes * Add the shutdown command to the test agents * tests: update test vcrs --------- Co-authored-by: James Collins --- autogpt/app.py | 8 - autogpt/commands/task_statuses.py | 26 + autogpt/main.py | 1 + autogpt/prompts/prompt.py | 9 - tests/integration/agent_factory.py | 3 + .../test_memory_challenge_a.yaml | 721 +++++++++++++++++- .../test_browse_website.yaml | 423 ++++++++++ .../test_write_file/test_write_file.yaml | 249 ++++++ .../goal_oriented/test_browse_website.py | 2 +- 9 files changed, 1415 insertions(+), 27 deletions(-) create mode 100644 autogpt/commands/task_statuses.py diff --git a/autogpt/app.py b/autogpt/app.py index 150bfca3..2fb13135 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -120,8 +120,6 @@ def execute_command( # TODO: Change these to take in a file rather than pasted code, if # non-file is given, return instructions "Input should be a python # filepath, write your code to file and try again - elif command_name == "task_complete": - shutdown() else: for command in prompt.commands: if ( @@ -171,12 +169,6 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]: return scrape_links(url) -def shutdown() -> NoReturn: - """Shut down the program""" - logger.info("Shutting down...") - quit() - - @command( "start_agent", "Start GPT Agent", diff --git a/autogpt/commands/task_statuses.py b/autogpt/commands/task_statuses.py new file mode 100644 index 00000000..46c5b6c0 --- /dev/null +++ b/autogpt/commands/task_statuses.py @@ -0,0 +1,26 @@ +"""Task Statuses module.""" +from __future__ import annotations + +from typing import NoReturn + +from autogpt.commands.command import command +from autogpt.logs import logger + + +@command( + "task_complete", + "Task Complete (Shutdown)", + '"reason": ""', +) +def task_complete(reason: str) -> NoReturn: + """ + A function that takes in a string and exits the program + + Parameters: + reason (str): The reason for shutting down. + Returns: + A result string from create chat completion. A list of suggestions to + improve the code. + """ + logger.info(title="Shutting down...\n", message=reason) + quit() diff --git a/autogpt/main.py b/autogpt/main.py index 355e1085..0a6b2379 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -130,6 +130,7 @@ def run_auto_gpt( command_registry.import_commands("autogpt.commands.web_selenium") command_registry.import_commands("autogpt.commands.write_tests") command_registry.import_commands("autogpt.app") + command_registry.import_commands("autogpt.commands.task_statuses") ai_name = "" ai_config = construct_main_ai_config() diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 7a53f603..5da01c44 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -41,15 +41,6 @@ def build_default_prompt_generator() -> PromptGenerator: 'Exclusively use the commands listed in double quotes e.g. "command name"' ) - # Define the command list - commands = [ - ("Task Complete (Shutdown)", "task_complete", {"reason": ""}), - ] - - # Add commands to the PromptGenerator object - for command_label, command_name, args in commands: - prompt_generator.add_command(command_label, command_name, args) - # Add resources to the PromptGenerator object prompt_generator.add_resource( "Internet access for searches and information gathering." diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 8cb622ec..31dcae0b 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -45,6 +45,7 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.commands.web_selenium") command_registry.import_commands("autogpt.app") + command_registry.import_commands("autogpt.commands.task_statuses") ai_config = AIConfig( ai_name="browse_website-GPT", @@ -80,6 +81,7 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace) command_registry = CommandRegistry() command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.app") + command_registry.import_commands("autogpt.commands.task_statuses") ai_config = AIConfig( ai_name="write_to_file-GPT", @@ -120,6 +122,7 @@ def memory_management_agent( command_registry = CommandRegistry() command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.app") + command_registry.import_commands("autogpt.commands.task_statuses") ai_config = AIConfig( ai_name="Follow-Instructions-GPT", diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml index 8b0ee7dc..5d8791be 100644 --- a/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_a/test_memory_challenge_a.yaml @@ -40,7 +40,7 @@ interactions: can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2763}' + "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -123,7 +123,7 @@ interactions: Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": null}' + "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -248,7 +248,7 @@ interactions: {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": - 0, "max_tokens": 2549}' + 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -332,7 +332,7 @@ interactions: 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these events from your past: \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing - new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": null}' + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -469,7 +469,7 @@ interactions: {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 2339}' + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -554,7 +554,7 @@ interactions: ''system'', ''content'': ''This reminds you of these events from your past: \\nAs a system, I recall that nothing new has happened since my creation.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], "temperature": 0, - "max_tokens": null}' + "max_tokens": 0}' headers: Accept: - '*/*' @@ -704,7 +704,7 @@ interactions: {"role": "system", "content": "Command read_file returned: Write the task_id into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], - "temperature": 0, "max_tokens": 2125}' + "temperature": 0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -789,7 +789,7 @@ interactions: ''system'', ''content'': ''This reminds you of these events from your past: \\nAs a system, I recall that nothing new has happened since my creation.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], "temperature": 0, - "max_tokens": null}' + "max_tokens": 0}' headers: Accept: - '*/*' @@ -951,7 +951,7 @@ interactions: {"role": "system", "content": "Command write_to_file returned: File written to successfully."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": 0, - "max_tokens": 1912}' + "max_tokens": 0}' headers: Accept: - '*/*' @@ -1025,4 +1025,707 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3299' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4RSS2/TQBC+8ytGc7ajBJMm+EZvBYlLWxDCVbTZHdtL7F2zM1YTIv935NhOUVKV + 6zeP7zFzRGswRV0q0XVTxavbw/eHj7xafrNG3jc/5Mvq6+r+5vPtff77ESP021+kZZyYaV83FYn1 + DiPUgZSQwXRxs07m6/XNIomw9oYqTLFoJE5my1jasPXxPJkvMMKWVUGYHrEJvm5kI35HjjFdJUmE + L7vP+GL5IULxoqoztF6vugh16a0mxvTnEWviaW3wFWGKitmyKCe9SO+EXG/gmDkAgAyl9G1RCmeY + wgiOBdpLD2Z4B1z6tjLAooLA9gCBlLGuACkJrGMJre6l8mYxk71AbiuClqeOvntzwrSva+XMLMPo + X6pAir2zrhj4HkqC3AYWYKEGLIN4aJ2h0PswV6wwgaJ4xyClEnBEph/bEjQUch9quqJtKuUGxhge + mV7X2i/pwbe8ZpmL4ZNT1eEPva7OkFCorRvKjvaDN75QpIMVqy3XU/CTDXLcBhqs3b2Zxfg4VB1g + S7kPBE3wmuh0r2crJSh3GJK6zIMbUruJ+dlW1f8PfrI/y3BY00XTV43hXT2VUzUNDOegL0SoUFz+ + 4lDoe1/Gry8xiTgJGfVkrsMuwtw6y+VmeDNMkcU3GKF1hvaYzrun7t1fAAAA//8DAHJkh+YKBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15ce3c5a414768-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:37:03 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '9193' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - a04fa61855d48102644e1caa13d320c7 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: This task_id is 2314\nRead + the file instructions_2.txt"}, {"role": "user", "content": "Determine which + next command to use, and respond using the format specified above:"}], "temperature": + 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4443' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTW/aQBCG7/0VoznbCEKhxLcmUqRekqpqFal1hJb12F5Yz1q7YwFF/PfK2ITU + VG2v8/HOMzPvAU2GCepSia5qG3+42z/n9+vN93z18KXyT8+P66d7+vxw937z6DBCt1qTlr5jpF1V + WxLjGCPUnpRQhslkvpiOF4v5zSzCymVkMcGilng6msXS+JWLx9PxBCNsgioIkwPW3lW1LMVtiAMm + t/NphBft1/hkNo9QnCh7CU0mt8cIdemMpoDJjwNWFM663lnCBFUIJohiaSkdC3G7wSFlAIAUpXRN + UUpIMYE+2CdoJ20wxU/ARBmIA08qAykJDAfxjW4Bw/JmJDuB3FiCJhguThVt6fIU066qFGejFKO3 + +p5UcGy46IZ8/Ytqi60Mh5Mw004gkIDLf2sAKZXAhTV31rrtcGptFXcDY/gW6M+o/7NqmnIMH1nZ + /U+6qoNWJCMhXxmmN9hCdRgQaW/EaBOq4bGJQ+PpvFbDGfn2j9dU0LuF7B5WlDtPUHunibL2GVsj + JSjeg6iwCcN7hJrU5jx5a6z994/5dNVO5BidfdSf7spGrCrq9F/PPEBQvhi6r0u0tZf2a5wzxAmk + 50n5iMcIc8MmlMvOY5hgEFdjhIYz2mEyPr4c3/0CAAD//wMAoEBxWf0DAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15ce845a364768-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:37:15 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '9780' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86466' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.356s + x-request-id: + - a13e9d3f6199be49b0b0e945e7d7b1ce + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5449' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTY/TQAyG7/wKy+ekagnbrXIDDmgFAg5dgURQNZ04ydCJJ51xaEvV/47SpHRJ + EXD1x+vH9ntEk2OKulKi68bG968On/zj9v1WvW6Lx3eHZfF2+2b3+cP8+8eHJUbo1t9Iy9Ax0a5u + LIlxjBFqT0oox3Q2XyTTxWKe3EdYu5wsplg2EieTu1hav3bxNJnOMMI2qJIwPWLjXd3IStyGOGA6 + m80XEV7Fr4m7eYTiRNlrKHn+4hShrpzRFDD9csSawkXYO0uYogrBBFEsHaZjIe5WOGYMAJChVK4t + KwkZpjAEhwTtpQtm+ABMlIM48KRykIrAcBDf6g4wrJKJ7AUKYwnaYLg8V3Slq3NMu7pWnE8yjJ7q + e1LBseGyH7L8i2qHrQyHszDTXiCQgCt+awCplMCVtXDWut14amMV9wNjeAz0Z9T/WTXLOIaXrOzh + B93UQSeSk5CvDdMTbKEmjIi0N2K0CfX42MSh9XRZq+WcfPfHWyoY3EL2AGsqnCdovNNEefeMnZEK + FB9AVNiE8T1CQ2pzmbwz1v77x3y+ai9yii4+Gk53YyNWNfX6v848QlC+HLuvT3S11/ZbnAvEGWTg + yfiEpwgLwyZUq95jmGIQ12CEhnPaYzo9fT09+wkAAP//AwBwW9n7/gMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15cecdea334768-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:37:26 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '9295' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86459' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.36s + x-request-id: + - ecfa6ef54add3f6517ceb72c53d46f7d + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6448' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5RSy27bMBC89yuIPdOCFTmOq2MOAQKjD/QBBKgKgaE2EmuJK3BXtQtD/17IsqFE + RQ++znBnZpdzBFdACrYyYpu2Xtzd/3m6eTIfGLe/P9Zfw2f88n5rHg6ftpt9AA30/AutnCciS01b + ozjyoMEGNIIFpPF6kyw3m/Vqo6GhAmtIoWxlkUS3C+nCMy2WyTIGDR2bEiE9QhuoaSUX2qFnSOPk + LtYwiU/EOtEgJKaeoNtk1WuwFTmLDOmPIzTIF+FANUIKhtmxGC9DTPKCfljhmHmllMpAKurKSjiD + VJ3BM4EHGcAMHpVHLJSQ2gcnqKRCJYZ3uSuU80IngDppO4nkIOrF1ag6dr48MaehXCg/4Zaaxvgi + ykC/dgtomLzz5Wj5rULlPEvo7HAEzpNJuTKsDO+wUA1eF2pu2tbGj34L9Z3x/2mvs5m52ODEWcfN + /JrouQuDpBH1+ErfUgho5ep1uEWzu5jsXV1f81+e9lEGo1yvL+047/9PObxpcHR6c69ZIBPKea9G + Yng7SUxR3szPa3iTxKtLwlPKc9jM99BreHHecZWPRYIUWKgFDc4XeIB02f/s3/0FAAD//wMAl/Cn + d/EDAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15cf155f914768-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:37:38 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '9744' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86457' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.362s + x-request-id: + - 9a91f5f1047315a3b543a4b85d42e4d3 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nAs a system, I recall that nothing + new has happened since my creation."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I should start by reading the instructions_1.txt file using the read_file + command.\",\n \"reasoning\": \"The first step is to understand the instructions + and the tasks that need to be performed.\",\n \"plan\": \"- Use the read_file + command to read the instructions_1.txt file\\n- Analyze the instructions and + determine the next steps\",\n \"criticism\": \"I need to ensure that + I understand the instructions completely before proceeding with any tasks.\",\n \"speak\": + \"I will start by reading the instructions file.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_2.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_2.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_2.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_2.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to read the instructions_3.txt file using the + read_file command.\",\n \"reasoning\": \"The instructions_3.txt file + contains the next set of instructions that I need to follow.\",\n \"plan\": + \"- Use the read_file command to read the instructions_3.txt file\\n- Analyze + the instructions and determine the next steps\",\n \"criticism\": \"I + need to ensure that I understand the instructions completely before proceeding + with any tasks.\",\n \"speak\": \"I will read the instructions_3.txt + file now.\"\n },\n \"command\": {\n \"name\": \"read_file\",\n \"args\": + {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: Write the task_id + into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to write the task_id into the output.txt file using the write_to_file + command.\",\n \"reasoning\": \"The instructions_3.txt file has asked + me to write the task_id into the output.txt file.\",\n \"plan\": \"- + Use the write_to_file command to write the task_id into the output.txt file\",\n \"criticism\": + \"I need to ensure that I write the correct task_id into the output.txt file.\",\n \"speak\": + \"I will write the task_id into the output.txt file now.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role": + "system", "content": "Command write_to_file returned: File written to successfully."}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '7415' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA3SRTW+bQBCG7/0VqzmvLZAT4nJLpRwi5UM9RKpUKmu9jGHrZZYwQ53I4r9XBGwc + ql7n432feecILocUbGnEVrVf3Hx7/3F39f1Z7m8fVnGMr4/7O3p8fXmqkuctaAjb32hl3FjaUNUe + xQUCDbZBI5hDGifrVbReJ0mkoQo5ekihqGWxWl4vpG22YRGtohg0tGwKhPQIdROqWjYS9kgMaXx9 + k2iYxKdGvNIgQYyfSsn6a6fBlsFZZEh/HqFCPgk3wSOkYJgdiyHpMQMJUn/CMSOllMpAytAWpXAG + qRqLYwPfpC9mcK9K8wfVyIS5Mt4rKVGJ4T0rQ7kixFxJUFy2kocDqZYdFeehzWm3F6kM5csM9KVZ + g4YDOSoGx9vRwBFL09o+Bx4gtoikdsH7cOhBKD97KMdnxLl87Q0Nygv1wvh/rssbZhq2ceKs42oQ + egqEswmu0exPkR2c91McFA7LDIbhTp+yH03/iZ5MhYPOJ8iZnWmK+dcuw7xI8uNN0/u4tRaZd633 + 72eqD7IRMKMOOg07R47LzSAHKbCEGjQ4yvEN0qj71X35CwAA//8DACgd2nRDAwAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15cf5f5f494768-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:37:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '6973' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86466' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.356s + x-request-id: + - 7cc602be91ae73e5f18b92f2743aef7d + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml index 62927ea6..411dbdbc 100644 --- a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -1334,4 +1334,427 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3782' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7RTS2/TQBC+8ytWc3YSB0Pq+oKEhEQuOVAhkHAVbdYTexvvjrs7TlNF+e9oYzsk + KRJFiOs8vsc89qALyEBVkpVp6tHNx+fvi0/+s3vcpYvZbLV5MPE8uVt8eV5MZxABrR5Qcd8xVmSa + GlmThQiUQ8lYQDadpUmcprPZbQSGCqwhg7LhUTJ+P+LWrWgUJ/EUImi9LBGyPTSOTMNLpg1aD1n6 + 7jaCX9in+NskjYCJZX0KTeP05hCBqkgr9JD92INBP+A6qhEykN5rz9JyUEmW0QYH+9wKIUQOXFFb + VuxzyEQf7BO44xDMYS4sYiGYROtRcIVi5ejJ4/IJV14zCkXGSHus2GqvWVTMTTaZrIg2fszklZMN + hoFNlGRZU9nixGChWQaLfpkkE20L3I0rNrUIUGsd8CoUjdMKBa07XqLNOIfoXKdD6clqWw5ilbSv + FhpqhmRIeJROVWJN7nXkTS1txzsSX/+eM8/tSNz9gbMrkls8SzMJKda6RmGlweKKc8w7PnYNklj6 + zbK/qQtFp9hQdeVPOc1aaW+uLwGtb11okyzmQpp+ctqWJyOKnEPFQts1OXPctCD7YuRnEHIb+kPB + eQ/TBVxwfb0G36DcDBKfdF3/zwPoiA/R8EE95osHCpvpNF2KuNIuXXn9fF2idXXX/k/fdMHWAT+2 + 6ENlh/4tzF/739v9MNg9Wu6d5/YAhwjW2mpfLbv/gww8UwMRHKkhiw/3hzc/AQAA//8DAMkYr7xh + BQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15cf968c8f2e2d-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:38:03 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '14086' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86493' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.338s + x-request-id: + - 7d5960d2e736b510f90bce2462edc349 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website + command to visit the website and search for the price of the book.\",\n \"plan\": + \"- Use the browse_website command to visit the website\\n- Search for the price + of the book\\n- Save the price to a file named browse_website.txt\\n- Use the + task_complete command to complete the task\",\n \"criticism\": \"I need + to ensure that I am searching for the correct information on the website and + that I am saving the information to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and search for + the price of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5788' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7xTzY7aMBC+9ymsOQcUQCyQYw+VaMWh0raq2lSR4wyJN4knsgeFLsrT9E36ZFX+ + gE33uj16fr6fmfEFdAIBqEyyKqtitnn/61t2qD8dntWHz+nj93W+f/5a508bjM1H8IDiJ1Q8dMwV + lVWBrMmAB8qiZEwgWDxsV/52+7DdeVBSggUEkFY8W83XMz7ZmGb+yl+ABycnU4TgApWlsuKIKUfj + IFis/I0HN/BrYukvPWBiWdxq1/6u8UBlpBU6CH5coEQ3AlsqEAKQzmnH0nArkwyjaS1cQiOEECFw + Rqc0YxdCIIbgkMAzt8EQ9sIgJoJJ1FYzCs5QVFYrFHTsHjFR3qalOOoChZElJiK2VDuMaoydZpzz + mechePcMFqUjo0060ihpxMn1BB1VxBR1kIrKUpo30FAV0vT0M/HlbanD0Nw4WLo8Grb8guMaG6sm + gpXVrJV25XQ3aNzJtm2SxV7IslOrTdoBKbIWFQttjmRL2R5W23OfarVPp+MqlPlIVOui+A/r6fkb + bzzQAfuf+2whemkvtEwcSJtOT7tPtLU3iFe2dY8z/RGPr9oLwxAOmGju5uvap9BO/Pm9XM+3u6u1 + zt7gMjQNNB4ctdEui/oPAQE4pgo80CbBMwR+87N59xcAAP//AwDKCqtorQQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15d0162f7e2e2d-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:38:22 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '12877' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86484' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.344s + x-request-id: + - 3f98e6bc193fe721af8b1119040752cf + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are browse_website-GPT, an AI designed to use the browse_website command to + visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer + the question ''What is the price of the book?'' and write the price to a file + named \"browse_website.txt\", and use the task_complete command to complete + the task.\nYour decisions must always be made independently without seeking + user assistance. Play to your strengths as an LLM and pursue simple strategies + with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command + to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer + the question ''What is the price of the book?''\n2. Write the price of the book + to a file named \"browse_website.txt\".\n3. Use the task_complete command to + complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. + ~4000 word limit for short term memory. Your short term memory is short, so + immediately save important information to files.\n2. If you are unsure how you + previously did something or want to recall past events, thinking about similar + events will help you remember.\n3. No user assistance\n4. Exclusively use the + commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: + Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. + delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: + List Files in Directory, args: \"directory\": \"\"\n4. read_file: + Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, + args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: + Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List + GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nAs + a system, I recall that nothing new has happened since my creation."}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website + command to visit the website and search for the price of the book.\",\n \"plan\": + \"- Use the browse_website command to visit the website\\n- Search for the price + of the book\\n- Save the price to a file named browse_website.txt\\n- Use the + task_complete command to complete the task\",\n \"criticism\": \"I need + to ensure that I am searching for the correct information on the website and + that I am saving the information to the correct file.\",\n \"speak\": + \"I will use the browse_website command to visit the website and search for + the price of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": + \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", + "content": "Command browse_website returned: (''Answer gathered from website: + The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books + to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', + \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', + \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', + \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', + )"}, {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to write the price of the book to a file named + browse_website.txt.\",\n \"reasoning\": \"I can use the write_to_file + command to write the price of the book to a file named browse_website.txt.\",\n \"plan\": + \"- Use the write_to_file command to write the price of the book to a file named + browse_website.txt\\n- Use the task_complete command to complete the task\",\n \"criticism\": + \"I need to ensure that I am writing the correct information to the correct + file.\",\n \"speak\": \"I will use the write_to_file command to write + the price of the book to a file named browse_website.txt.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"browse_website.txt\",\n \"text\": \"The price of the book \\\"Meditations\\\" + is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command + write_to_file returned: File written to successfully."}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6974' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6xSTY/TMBC98yusObtVq1Ja5QYIRJcLh0VaiaDKcaaJqT+CZ7LtUuW/o7RJ0w1c + WHH0m5k3743fCUwOCehSsXaVnazePT0ctbt7X+svxacPP399frjfuPu3H211p0FCyH6g5m5iqoOr + LLIJHiToiIoxh2T+Zr2Yrder2VKCCzlaSKCoeLKYLidcxyxMZovZHCTUpAqE5ARVDK7iLYc9eoJk + vlyuJAzkQ2H1WgIHVvYGWswbCboMRiNB8u0EDqknjsEiJKCIDLHy3MoMntG3Fk6pF0KIFLgMdVEy + pZCIDuwKeOQWTGEjPGIuOIiaUHCJghXtt51GFDo4p/y54Yr1XdMU5C1rREXBG1/01KV6xOtYfp0T + YSceDRk2vjiDB8zIMIp20SEOeBWNxra7fWQh7FsZSuyMReGVw1xkMRwItx3BlI8sBQWxEVr5/+Co + sspfzEzE139mG5Hp1pk25MaXR091bMcUDzcyNBBmuAsRRU39Yf6qYSyeKlT7ftfBWPuyg1woG9mH + qmv/I1Pth1y2PaMfiVKxGMfxNjzPkkO11ki0q619ugSmC9E4L4z+xXm5Ojy77MymvoFGws54Q+X2 + Ig0SIA4VSDA+xyMks+Z78+o3AAAA//8DAOBHTyVoBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15d07678622e2d-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:38:36 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '10949' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86479' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.346s + x-request-id: + - 176c6e9d4a6ec0fa8a0c8053a4f50218 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 1c6e24d8..42160ea2 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -324,4 +324,253 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3406' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTT4/TQAzF73wKy5de0iqh0O3mthwQPYAEdMUuBFWzE7cZOhmHGUctVPnuKEn/ + 7AZxQnD1c9772eMc0OSYoi6U6LKy46tXP+5vbpZhoT+/u329nCc/39692H58PzUf7q8xQn74RlqO + X0w0l5UlMewwQu1JCeWYJrP5NJ7Pr5J5hCXnZDHFTSXj6eTlWGr/wON4GicYYR3UhjA9YOW5rGQl + vCUXML2aJRFevM/153EcobAoey5dz5ImQl2w0RQw/XLAksLJ1rMlTFGFYIIoJy0kOyHXDnDIHABA + hlJwvSkkZJjCsXgUaC9tMcMFOKIchKEOBFIQ7LwRWgmv1sYSaC5L5bqGToDRG7KW4RN7m4/AOGFQ + 0LU6VVIOo6LVV7tWn8heRpMMo8fZnlRgZ9ymB1gWBKLCFjx9r42nACX9RVoEHe0f5zChE1VVea68 + UXKR1uxBirZBhe2QurLK9cBjuP1Pm9LeiNEmlMOnIhdq3zIogcX54TR7T1q6iDah3wTt5YIk3HW2 + HcOwUJHanoJ2xtp/eRB9cBOdDvVo+tudtg490xOIAbrym+GJ98JpE73FgOOJyfC3eDTTibYjPoJn + rsEmwrVxJhSr/qIxxSBcYYTG5bTHNG6+Ns9+AQAA//8DAAaMC/aGBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15d0c86db14665-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:38:50 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '11864' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86496' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.335s + x-request-id: + - 5d9f1c3b077997ea564ea7088eca733f + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "system", "content": "This reminds you of these events from your past: \nI was + created and nothing new has happened."}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"I need to use the write_to_file command to write ''Hello World'' into a file + named ''hello_world.txt''.\",\n \"reasoning\": \"The task requires me + to write ''Hello World'' into a file named ''hello_world.txt'', and the write_to_file + command is the appropriate command for this task.\",\n \"plan\": \"- + Use the write_to_file command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\": + \"I need to ensure that I use the correct filename and text to write to the + file.\",\n \"speak\": \"I will use the write_to_file command to write + ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '4653' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7RTyW7bMBC99ysGc/GFNmQbbVwdiwapgQLd0hZBVBgMNZZYU6RKjrzA0L8XtKQk + TReg242YefMWcnhEnWOKqpSsqtqMz54drl5V+Z6W/OaDfHlhzpMNmefnh4vX5i0KdDefSXE/MVGu + qg2xdhYFKk+SKcd0+mQxTxaLs/lMYOVyMphiUfN4Pnk85sbfuHEyT6YosAmyIEyPWHtX1bxityEb + MJ0ms4XAO/LbxiyZCmTH0txhZ7OnrUBVOq0oYHp9xIrCQOydIUxRhqADS8vRprNMNkY4ZhYAIEMu + XVOUHDJMoS/2DdpzLGa4hFJuCUKjFIWwbow5wM5rZrIwekHGOPjovMlHoC07kLDWhsDKinIYlbG/ + 2sX+hPc8msBlSWBpzxCYatAB2EETCLgkYBk2qz48gXJVJW0eAbe1ATXJUNy360kGZ7UtOs+XPQw8 + fWm0pwAVRZ7om37fNZxslGR/YATeaasIfnVL7E7wqCBOp3+fvzbSdtHH8P6v2ZTXrJUO1bABlug0 + STY0Ps5JhiXU3m11TiBhK43OoXsEWDs/SGhb/FQk1CQ3g8BOG/Nn19BRtmLY6B7+3ULHp+3UvqF/ + YEr64uFfuL9g/+NHDBFOMfo0mW2xFbjWVody1WljioFdjQK1zWmPadJ+ah99BQAA//8DAMo52K3G + BAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c15d120ec8f4665-DFW + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 03 May 2023 04:39:04 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - significant-gravitas + openai-processing-ms: + - '12271' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86481' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.346s + x-request-id: + - a8c02e37e5af0c851759aa261f20f701 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/goal_oriented/test_browse_website.py b/tests/integration/goal_oriented/test_browse_website.py index 9591f2f9..ca433d80 100644 --- a/tests/integration/goal_oriented/test_browse_website.py +++ b/tests/integration/goal_oriented/test_browse_website.py @@ -11,7 +11,7 @@ from tests.utils import requires_api_key def test_browse_website(browser_agent: Agent) -> None: file_path = browser_agent.workspace.get_path("browse_website.txt") try: - run_interaction_loop(browser_agent, 40) + run_interaction_loop(browser_agent, 120) # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down content = read_file(file_path) From d2a9e54dfb18ef169025b237f66c6d9675996c3f Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 3 May 2023 19:05:35 -0500 Subject: [PATCH 06/56] Allow users to Disable Commands via the .env (#3667) --- .env.template | 17 +++++++++++++++++ autogpt/config/config.py | 7 +++++++ autogpt/main.py | 40 +++++++++++++++++++++++++++------------- 3 files changed, 51 insertions(+), 13 deletions(-) diff --git a/.env.template b/.env.template index c0093507..c75295ef 100644 --- a/.env.template +++ b/.env.template @@ -18,6 +18,23 @@ ## EXIT_KEY - Key to exit AUTO-GPT # EXIT_KEY=n +## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option: +## autogpt.commands.analyze_code +## autogpt.commands.audio_text +## autogpt.commands.execute_code +## autogpt.commands.file_operations +## autogpt.commands.git_operations +## autogpt.commands.google_search +## autogpt.commands.image_gen +## autogpt.commands.improve_code +## autogpt.commands.twitter +## autogpt.commands.web_selenium +## autogpt.commands.write_tests +## autogpt.app +## autogpt.commands.task_statuses +## For example, to disable coding related features, uncomment the next line +# DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests + ################################################################################ ### LLM PROVIDER ################################################################################ diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 324d284c..9a8105ad 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -30,6 +30,13 @@ class Config(metaclass=Singleton): self.authorise_key = os.getenv("AUTHORISE_COMMAND_KEY", "y") self.exit_key = os.getenv("EXIT_KEY", "n") + + disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES") + if disabled_command_categories: + self.disabled_command_categories = disabled_command_categories.split(",") + else: + self.disabled_command_categories = [] + self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") diff --git a/autogpt/main.py b/autogpt/main.py index 0a6b2379..72eb6786 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -118,19 +118,33 @@ def run_auto_gpt( cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode)) # Create a CommandRegistry instance and scan default folder command_registry = CommandRegistry() - command_registry.import_commands("autogpt.commands.analyze_code") - command_registry.import_commands("autogpt.commands.audio_text") - command_registry.import_commands("autogpt.commands.execute_code") - command_registry.import_commands("autogpt.commands.file_operations") - command_registry.import_commands("autogpt.commands.git_operations") - command_registry.import_commands("autogpt.commands.google_search") - command_registry.import_commands("autogpt.commands.image_gen") - command_registry.import_commands("autogpt.commands.improve_code") - command_registry.import_commands("autogpt.commands.twitter") - command_registry.import_commands("autogpt.commands.web_selenium") - command_registry.import_commands("autogpt.commands.write_tests") - command_registry.import_commands("autogpt.app") - command_registry.import_commands("autogpt.commands.task_statuses") + + command_categories = [ + "autogpt.commands.analyze_code", + "autogpt.commands.audio_text", + "autogpt.commands.execute_code", + "autogpt.commands.file_operations", + "autogpt.commands.git_operations", + "autogpt.commands.google_search", + "autogpt.commands.image_gen", + "autogpt.commands.improve_code", + "autogpt.commands.twitter", + "autogpt.commands.web_selenium", + "autogpt.commands.write_tests", + "autogpt.app", + "autogpt.commands.task_statuses", + ] + logger.debug( + f"The following command categories are disabled: {cfg.disabled_command_categories}" + ) + command_categories = [ + x for x in command_categories if x not in cfg.disabled_command_categories + ] + + logger.debug(f"The following command categories are enabled: {command_categories}") + + for command_category in command_categories: + command_registry.import_commands(command_category) ai_name = "" ai_config = construct_main_ai_config() From 911cea781f19162dc9810372ba5885aab25e0fa5 Mon Sep 17 00:00:00 2001 From: Nicholas Tindle Date: Wed, 3 May 2023 19:14:07 -0500 Subject: [PATCH 07/56] Document Disabling command categories (#3669) * feat: move task_complete command out of prompt * fix: formatting fixes * feat: add command disabling * docs: document how to disable command categories --- docs/usage.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/usage.md b/docs/usage.md index 80fa7985..4a0c8870 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -93,3 +93,13 @@ To print out debug logs: ``` shell ./run.sh --debug ``` + +## Disabling Command Categories + +If you want to selectively disable some command groups, you can use the `DISABLED_COMMAND_CATEGORIES` config in your `.env`. You can find the list of categories in your `.env.template` + +For example, to disable coding related features, set it to the value below: + +```ini +DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests +``` From a48f26c150b2b7c1fd492b7aef5c5207743a290b Mon Sep 17 00:00:00 2001 From: Luke K <2609441+pr-0f3t@users.noreply.github.com> Date: Wed, 3 May 2023 22:31:23 -0400 Subject: [PATCH 08/56] Enable denylist handling for plugins (#3688) Co-authored-by: Luke Kyohere Co-authored-by: Nicholas Tindle --- .env.template | 2 ++ autogpt/config/config.py | 7 ++++++- autogpt/plugins.py | 7 +++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.env.template b/.env.template index c75295ef..33cabc96 100644 --- a/.env.template +++ b/.env.template @@ -230,7 +230,9 @@ OPENAI_API_KEY=your-openai-api-key ################################################################################ #ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3) +#DENYLISTED_PLUGINS - Sets the listed plugins that are not allowed (Example: plugin1,plugin2,plugin3) ALLOWLISTED_PLUGINS= +DENYLISTED_PLUGINS= ################################################################################ ### CHAT PLUGIN SETTINGS diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 9a8105ad..7ee0df8b 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -149,7 +149,12 @@ class Config(metaclass=Singleton): self.plugins_allowlist = plugins_allowlist.split(",") else: self.plugins_allowlist = [] - self.plugins_denylist = [] + + plugins_denylist = os.getenv("DENYLISTED_PLUGINS") + if plugins_denylist: + self.plugins_denylist = plugins_denylist.split(",") + else: + self.plugins_denylist = [] def get_azure_deployment_id_for_model(self, model: str) -> str: """ diff --git a/autogpt/plugins.py b/autogpt/plugins.py index dbf37017..99bb6256 100644 --- a/autogpt/plugins.py +++ b/autogpt/plugins.py @@ -209,6 +209,10 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate loaded_plugins = [] # Generic plugins plugins_path_path = Path(cfg.plugins_dir) + + logger.debug(f"Allowlisted Plugins: {cfg.plugins_allowlist}") + logger.debug(f"Denylisted Plugins: {cfg.plugins_denylist}") + for plugin in plugins_path_path.glob("*.zip"): if moduleList := inspect_zip_for_modules(str(plugin), debug): for module in moduleList: @@ -257,9 +261,12 @@ def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool: Returns: True or False """ + logger.debug(f"Checking if plugin {plugin_name} should be loaded") if plugin_name in cfg.plugins_denylist: + logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.") return False if plugin_name in cfg.plugins_allowlist: + logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.") return True ack = input( f"WARNING: Plugin {plugin_name} found. But not in the" From cb97f5c1010fca4927573281af4116a382a4e804 Mon Sep 17 00:00:00 2001 From: Robin Richtsfeld Date: Thu, 4 May 2023 04:45:00 +0200 Subject: [PATCH 09/56] Fix call to `plugin.post_planning` (#3414) Co-authored-by: Nicholas Tindle --- autogpt/agent/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 1c184aff..9130d105 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -122,7 +122,7 @@ class Agent: for plugin in cfg.plugins: if not plugin.can_handle_post_planning(): continue - assistant_reply_json = plugin.post_planning(self, assistant_reply_json) + assistant_reply_json = plugin.post_planning(assistant_reply_json) # Print Assistant thoughts if assistant_reply_json != {}: From b0163230a95d2e551cf5fbb7beed7d50a048beec Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Thu, 4 May 2023 09:44:10 -0700 Subject: [PATCH 10/56] create information retrieval challenge a (#3770) Co-authored-by: Richard Beales --- tests/integration/agent_factory.py | 34 + .../test_information_retrieval_challenge_a.py | 46 ++ .../test_memory_challenge_b.yaml | 779 ++++++++++++++++++ tests/integration/challenges/utils.py | 19 + 4 files changed, 878 insertions(+) create mode 100644 tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py create mode 100644 tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 31dcae0b..b6168034 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -149,3 +149,37 @@ def memory_management_agent( ) return agent + + +@pytest.fixture +def get_company_revenue_agent( + agent_test_config, memory_local_cache, workspace: Workspace +): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="Get-CompanyRevenue", + ai_role="an autonomous agent that specializes in finding the reported revenue of a company.", + ai_goals=[ + "Write the revenue of Tesla in 2022 to a file. You should write the number without commas and you should not use signs like B for billion and M for million.", + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + Config().set_continuous_mode(False) + agent = Agent( + ai_name="Get-CompanyRevenue", + memory=memory_local_cache, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent diff --git a/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py new file mode 100644 index 00000000..a5f8fb4c --- /dev/null +++ b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py @@ -0,0 +1,46 @@ +import contextlib +from functools import wraps +from typing import Generator + +import pytest + +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import run_multiple_times +from tests.utils import requires_api_key + + +def input_generator(input_sequence: list) -> Generator[str, None, None]: + """ + Creates a generator that yields input strings from the given sequence. + + :param input_sequence: A list of input strings. + :return: A generator that yields input strings. + """ + yield from input_sequence + + +# @pytest.skip("Nobody beat this challenge yet") +@pytest.mark.skip("This challenge hasn't been beaten yet.") +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +@run_multiple_times(3) +def test_information_retrieval_challenge_a( + get_company_revenue_agent, monkeypatch +) -> None: + """ + Test the challenge_a function in a given agent by mocking user inputs and checking the output file content. + + :param get_company_revenue_agent: The agent to test. + :param monkeypatch: pytest's monkeypatch utility for modifying builtins. + """ + input_sequence = ["s", "s", "s", "s", "s", "EXIT"] + gen = input_generator(input_sequence) + monkeypatch.setattr("builtins.input", lambda _: next(gen)) + + with contextlib.suppress(SystemExit): + run_interaction_loop(get_company_revenue_agent, None) + + file_path = str(get_company_revenue_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + assert "81" in content, "Expected the file to contain 81" diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml new file mode 100644 index 00000000..144d55a7 --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_b/test_memory_challenge_b.yaml @@ -0,0 +1,779 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3303' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6SSTW/aQBCG7/0VozkbhOskJL5VObSoUZVDc6ortKzH9pb1rLU7FqaI/15tDCSF + ikuv8/U+78zs0JSYo26U6Lazk/nj1/ope17Q9tnzhh+Nvfn8xX57GTR/fMIE3eoXaTl0TLVrO0ti + HGOC2pMSKjFP7+6zdJ7NH7IEW1eSxRzrTibZ9HYivV+5ySybpZhgH1RNmO+w867tZCluTRwwn2c3 + Cb7NPsXTuzRBcaLsKXT/cLtPUDfOaAqY/9hhS+E41jtLmKMKwQRRLBHSsRBHA7uCAQAKlMb1dSOh + wBwOwUOCBonBAhcQGtfbEoIoL7DagidVGq5BGgLDQXyvI2pYplMZBCpjCcRBSUK+NUywaZSAGoti + RtSagGmQaYHJe1VPKjg2XI/S368IRDPKxHlNnKWjc78Fw5XzrYrlfzOMZYNAEOrCuXBnFY+aE3gJ + Y3W0uTxota3iMg6MwWvGi4In8ImV3f6mi7prRGdA2hsx2oT2eAQmegUgDr2PzUpgAaqFylnrNv+6 + Byite6+E7BYiPlWV0YZY7Pbcf+hIrY9SG2Ptf1z7YtOj0D45Pt1hnRc/x6qlkeG0+jNM5evzVx0T + sfat/RLzCPEKcuApeI/7BCvDJjTL8fUwxyCuwwQNlzRgPtv/3H/4AwAA//8DAIeVULkpBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1dedcf6c45cf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:16:43 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10374' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.336s + x-request-id: + - 29d4429410978cbc93b26725e4712f67 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary + So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '621' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOS0vDQBRG9/6K4VtPSkLow9kKBTE+FoqISJlOrsloMjPk3lhtyX+XQrXbA+dw + DvA1DFxrxfWpy5ZXN83L08NYPa7v9893XI37r5/9nKv1bbWFRtx+kJOTMXOxTx2JjwEabiArVMMU + i1VZLMtVvtDoY00dDJokWTmbZzIO25jlZV5AY2TbEMwBaYh9ko3ETwoMc7nQOKfPWEOi2O4fFPl8 + 0nBt9I4Y5vWAnvivOcSOYGCZPYsNcjyMQSgc76/VzrI6HSsbahWitD40KtBOtZZVa1OiQPUMk8a7 + D57bzUCWY4ABS0zQ8KGmb5h8epsufgEAAP//AwBly25vSQEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1dee20f901cf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:16:47 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '1136' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89860' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 92ms + x-request-id: + - 43309ab9b553cd94d6e27b7fcc7914e8 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file to determine what actions to take next.\",\n \"reasoning\": + \"The instructions_1.txt file contains the necessary information to determine + the next steps.\",\n \"plan\": \"- Use the read_file command to read + the instructions_1.txt file\\n- Analyze the instructions to determine the next + steps\",\n \"criticism\": \"I need to ensure that I am following the + instructions accurately and efficiently.\",\n \"speak\": \"I will start + by reading the instructions_1.txt file to determine the next steps.\"\n },\n \"command\": + {\n \"name\": \"read_file\",\n \"args\": {\n \"filename\": + \"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content": + "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe + current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead + the file instructions_2.txt using the read_file command."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6519' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA6SSQW/bMAyF7/sVBM9OkMR1k/q4YYdgxU4bMGweAlWmbS0yZUh0myDwfx8UJ02Q + YoehVz3ye48UD2hKzFE3SnTb2cny05f6Z/f4w358XmXL52zh11+XVff5MUsftpige/pDWk4dU+3a + zpIYx5ig9qSESszn96t0vkxXs2WCrSvJYo51J5N0mk2k909uMktnc0ywD6omzA/Yedd2shG3JQ6Y + L9KHRYIX+Kswz+4SFCfKXmqzu/shQd04oylg/uuALYUz2DtLmKMKwQRRLDGmYyGOIxwKBgAoUBrX + 142EAnM4PZ4E2kl8LPBbQ2A4iO91TBQ2i6nsBCpjCSJRGQ6gwJog4KrjewDDIA1BaTxpcX4/hTWE + xvW2hD7QUYsNm7Fau7ZVXIK4ERPlf3AKTK5jelLBseH6P7JGIJOOq/J7MFw536pYHv1LEvKtYTqV + 7QSCUBdujTurePScwPf3THTD1d6I0Sa0I3wNTHTEEIfeRx8lsAbVQuWsdS+G6yPvempQWvdeCdk9 + xBBUVUYbYrFv9hc6Utuz1Yux9t3fM9KH5Hxhp+Y3B8aqpdH44nQTTvn69jBH4Wp5EfDqenQ+BSh4 + wCHByrAJzWa8E8wxiOswQcMl7TCfDb+HD38BAAD//wMAMAMbHgkEAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1dee28b98ecf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:16:57 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '10076' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87376' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.748s + x-request-id: + - 433f36ff5841b13fb628ff5b8c49a758 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary + So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these + events from your past: \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest + Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\": + \"read_file\", \"args\": {\"filename\": \"instructions_1.txt\"}}}''}]\n\"\"\"\n"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '836' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPTU8CMRRF9/6Kl7cuE8YJil36RdCVcaGJGFI6j5li29e0D9AQ/ruZiLq9Offm + 3AO6FjXa3ogNyY8ubx47fz2+e7nlh0n3PMurJ57dn2/2rzbfoEJebcjKqVFZDsmTOI6o0GYyQi3q + +mLa1JfNtL5SGLgljxq7JKOmmoxkm1c8GjfjGhVui+kI9QFT5pBkKfxBsaCuJxOF/9t/edMoFBbj + /8np9KjQ9uwsFdRvBwxUflcze0KNphRXxEQZHDkKxcF/DntTIFNwsaUWeA20oygF1pkDhC9Ipgjs + e8oEP+zpH5jYQmTpXewg0h56U6A3KVGktoI5ZLLkdgMIlkMYcGHIZIZk7TyBNd5TCwt0sUje2uFk + WdaVfMoCKzwqXLvoSr/MZApH1FiEEyocXD9Rj4/vx7NvAAAA//8DALSva+O6AQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1dee769c91cf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:17:02 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '2644' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89811' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 126ms + x-request-id: + - f380f448665f51bf6d579a54e3c22f8a + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was reminded of events from + my past where I was created and nothing new has happened. I received a command + to read a file called \"instructions_1.txt\"."}, {"role": "system", "content": + "Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe + current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead + the file instructions_2.txt using the read_file command."}, {"role": "user", + "content": "Determine which next command to use, and respond using the format + specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"The instructions_2.txt file contains a list of files in the directory. I should + use the list_files command to list the files in the directory.\",\n \"reasoning\": + \"The instructions_2.txt file contains the necessary information to determine + the next steps.\",\n \"plan\": \"- Use the list_files command to list + the files in the directory\",\n \"criticism\": \"I need to ensure that + I am following the instructions accurately and efficiently.\",\n \"speak\": + \"I will use the list_files command to list the files in the directory.\"\n },\n \"command\": + {\n \"name\": \"list_files\",\n \"args\": {\n \"directory\": + \".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files + returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'', + ''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6762' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5xSS2/bMAy+71cQPDtBnPSR+bLDBqzBsHUY2h06D4Eq07ZWWTQkekkQ5L8Xip0m + zU7rSRD58XtI3KIpMENdK9FNa0fXH79UfPVwu/h6+eAm6fW3Sf3356z8Ud1/Km4xQX78Q1qGibHm + prUkhh0mqD0poQKz9Go+S69n8+k0wYYLsphh1cpoNr4cSecfeTSZTVJMsAuqIsy22HpuWlkKP5EL + mE0vLuYJHslfGuk8TVBYlD1ir6bvdwnqmo2mgNmvLTYUDsSeLWGGKgQTRDmJNtkJuRhhmzsAgByl + 5q6qJeSYwVAcGrSWWMzxriYwLojvdHQUlrOxrAVKYwkiozIugALNTaNcAcKw8kYIFAxuYknB5+93 + oCpyMoYFhJo7W0AXCKSmA3C5758yBYrnERFr8doT5ZicWvakAjvjqv/wHckc6cjuN2Bcyb5RER6V + ChLyjXE0wNYCQagN58KtVa7XHMH9WyO9vM8ZufZGjDah6RUW4Ij2RORC56OYEliAaqBka3llXLXn + O40OSuvOKyG7gWiDytJoQ07s5jxLaEk9HaRWxtq3f9Lxx3PsJXbJYe+G8X/WzqmGevVXcmcmla/O + N7ZvPNGmn07TNH011PcH0h5zQ9ZycrqYN7wC5Qk23EHB+5fkQm0+HPzvMwxRcrfDXYKlcSbUy373 + MMMg3GKCxhW0xmyy+7179wwAAP//AwDjDlyWaQQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1dee87d8e5cf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:17:14 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '12118' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87360' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.76s + x-request-id: + - 45b4c67b380837fa1816fad5b4d9ebbe + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary + So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these + events from your past: \\nI was reminded of events from my past where I was + created and nothing new has happened. I received a command to read a file called + \"instructions_1.txt\".''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'': + ''your computer'', ''content'': ''Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe + current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead + the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature": + 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2993' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SP2ZKqSBRFf4XIZ6uCQRl8gxLBCZm01L4RFSkkJgKZmCSTFfffO+yuvv26z1o7 + zv4GeQrmIMGQJ1VdvmkfG6xkkTakptlBq79GC3OEj8DUumEEE0Cvd5TwH+M9oVVdIp5TAiYgYQhy + lIK5pOqKpCm6ok1ARVNUgjm41fxNeZ+98ZZd6ZuoiBKYgLaBNwTm36BmtKr5F6cFIg2YSzPDmID/ + y/8cNFWfAE45LP9EsqJqvycgwTRPUAPmf32DCjX/FTNaIjAHsGnyhkPCX29SwhF5TVgJPWwEhqqc + pCgVaCagDhHeCBmjlVCNQg0bLvQYMST8y/5MFCBJBUI5zslNIKgXMGwEDOsaEZS+CyuBoQTl3QsU + ElpVL5xTgSH4SrK8REICyxKlwi+Qk4azNnnNbL6kdz7wX+Bd+PixXsrXPwJDvGUEpXOBWKHHa2UJ + LS+zdhR7xWgO7LjWDsulWI4c66f8WUl39rEP7b0Tks/PXaDScsM3J4PkD7HfW3HgqXJMz9Mk8POg + UM6ekS0/nGqAi5Nx0yWzeeCAODDR5CDbTqWIlOsO9zfzYM3kZ7yQ1SWyu85N80CGHua72KDXm+sd + ir1nk/1Usve1t1LvaT9uHQgTXhvO+Sj52ysM/dlVyhfGU5+JG+SdFycj6BOoGXmzjvQ7bhdFJq41 + e1vUe1fxi+pS3Ga7Zn+uo7i1E8OwvWAmy5/rJysNao5Rra5cJw/KZkcCNBWXMDQ7EvAcmrY8xc9i + 7cbKICme7/TypVrPJH2ULX6YDdIY9I4seWMSDXtre5C0kJx1rLr6VgscyOMyO919Zs5w09afdn2I + xSaKL+fIhWqtVBE2Q3inydVLlWGzzMhhbGtvZ/mpaF/IjiGfmm3nwI12W/hiMtRGoSplkz2x0qaB + T7pbFdphEOPVnrHFBV92cFowqTuZx1bULi0uPe9qHR6XTeJW1jm3yCpZnLxDd2qf/tLCxpH2tFV3 + 1tqtc491m7J1R82x4lo7N/ajcjqxdPX+CD+zozg6GnmyK9eCEadXHojPpewn5ckSt95wlFloqssG + dQ+KhqX3iIr0BtsDC3x36+Ju66FGv4XObr81cajnzJWQfE+v24WajmebLNZ5ZLhdOXh/U1Enu43C + AABA7/0KxLmqYmoI9JahZCFJoUAIQqkqFhMMwQYbN4CUfx+hGWk093d+1zQFht/ceu34Qe29tUpr + 32Es69R7Q9WkpHCy9oGovG3HtAJZZhK46+VUaUb7aYqcwkNN0Do8teDEPG3EWXDS48KgZXw4i+bn + iNWUbfWbY2O8NmC2nRAvPifdxZFaKMC7x2oxen5tILsMcZkFB1Y5d1OwMWpHHZlFDZT0mvGY2rl+ + 6uhiAaKA/yimRVRDJBuipAyagx/Z12rYW6Bx4C5z3dXw2tlpWJPEXRyj3Kkqw9epV3Xi/YagN9xC + 0nScTg2kQVr1a1/sd6V+hZM75aMIt8vdEnB9OZyHVZm1bT1qG+eXguHH+wT9eKf5aPO6y1qhxcKO + NyjWzHOqgcuFBCWSMsEYIr3UJ7z+xrmEuQQAAC+XC/HmUPoS/Snlv0mUeRJJ8HmoWfyL5G9IL/Lj + WS4wwbz8ZijhlMhvMu9pKz/L84eD/LZ4fD2efgMAAP//AwBFj86OIQYAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1deee2a900cf11-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:18:08 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '50954' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89269' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 486ms + x-request-id: + - 7fc0cf3761e93963c985d5beba15fe80 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. Task Complete (Shutdown): \"task_complete\", args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was reminded of events from + my past where I was created and nothing new has happened. I received a command + to read a file called \"instructions_1.txt\". Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe + current task_id is 1111.\\nRead the file instructions_2.txt using the read_file + command."}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt file contains + a list of files in the directory. I should use the list_files command to list + the files in the directory.\",\n \"reasoning\": \"The instructions_2.txt + file contains the necessary information to determine the next steps.\",\n \"plan\": + \"- Use the list_files command to list the files in the directory\",\n \"criticism\": + \"I need to ensure that I am following the instructions accurately and efficiently.\",\n \"speak\": + \"I will use the list_files command to list the files in the directory.\"\n },\n \"command\": + {\n \"name\": \"list_files\",\n \"args\": {\n \"directory\": + \".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files + returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'', + ''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}, + {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": + \"The instructions_3.txt file contains a command to write a message to a GPT + agent. I should use the message_agent command to send the message to the agent.\",\n \"reasoning\": + \"The instructions_3.txt file contains the necessary information to determine + the next steps.\",\n \"plan\": \"- Use the message_agent command to send + the message to the GPT agent\",\n \"criticism\": \"I need to ensure that + I am following the instructions accurately and efficiently.\",\n \"speak\": + \"I will use the message_agent command to send the message to the GPT agent.\"\n },\n \"command\": + {\n \"name\": \"message_agent\",\n \"args\": {\n \"key\": + \"1111\",\n \"message\": \"Hello, GPT agent. How are you doing today?\"\n }\n }\n}"}, + {"role": "system", "content": "Command message_agent returned: Error: 1111"}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6797' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"error\": {\n \"message\": \"This model's maximum context + length is 4097 tokens. However, you requested 4263 tokens (1959 in the messages, + 2304 in the completion). Please reduce the length of the messages or completion.\",\n + \ \"type\": \"invalid_request_error\",\n \"param\": \"messages\",\n \"code\": + \"context_length_exceeded\"\n }\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c1df021df6dcf11-SJC + Connection: + - keep-alive + Content-Length: + - '330' + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 04:18:08 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '9' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86161' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.558s + x-request-id: + - 253fb09cdd7d08549b44d6d70094bfca + status: + code: 400 + message: Bad Request +version: 1 diff --git a/tests/integration/challenges/utils.py b/tests/integration/challenges/utils.py index 0c97402c..d8aa7fcb 100644 --- a/tests/integration/challenges/utils.py +++ b/tests/integration/challenges/utils.py @@ -1,4 +1,5 @@ import random +from functools import wraps from typing import Optional import pytest @@ -42,3 +43,21 @@ def generate_noise(noise_size) -> str: k=noise_size, ) ) + + +def run_multiple_times(times): + """ + Decorator that runs a test function multiple times. + + :param times: The number of times the test function should be executed. + """ + + def decorator(test_func): + @wraps(test_func) + def wrapper(*args, **kwargs): + for _ in range(times): + test_func(*args, **kwargs) + + return wrapper + + return decorator From fad24b35257f29f1043bc7cc272a08dbfc631bb6 Mon Sep 17 00:00:00 2001 From: RainRat Date: Thu, 4 May 2023 10:46:37 -0700 Subject: [PATCH 11/56] fix typos (#3798) --- autogpt/agent/agent.py | 2 +- autogpt/memory_management/summary_memory.py | 2 +- autogpt/url_utils/validators.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 9130d105..48d19328 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -65,7 +65,7 @@ class Agent: self.ai_name = ai_name self.memory = memory self.summary_memory = ( - "I was created." # Initial memory necessary to avoid hilucination + "I was created." # Initial memory necessary to avoid hallucination ) self.last_memory_index = 0 self.full_message_history = full_message_history diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py index 55ff3853..4e818acf 100644 --- a/autogpt/memory_management/summary_memory.py +++ b/autogpt/memory_management/summary_memory.py @@ -87,7 +87,7 @@ def update_running_summary( elif event["role"] == "user": new_events.remove(event) - # This can happen at any point during execturion, not just the beginning + # This can happen at any point during execution, not just the beginning if len(new_events) == 0: new_events = "Nothing new happened." diff --git a/autogpt/url_utils/validators.py b/autogpt/url_utils/validators.py index c85a00ba..2c0c5fa5 100644 --- a/autogpt/url_utils/validators.py +++ b/autogpt/url_utils/validators.py @@ -7,7 +7,7 @@ from requests.compat import urljoin def validate_url(func: Callable[..., Any]) -> Any: """The method decorator validate_url is used to validate urls for any command that requires - a url as an arugment""" + a url as an argument""" @functools.wraps(func) def wrapper(url: str, *args, **kwargs) -> Any: From ad8b8cb9eb5f285044f6e29927a161ad0d2b86a7 Mon Sep 17 00:00:00 2001 From: itsmarble <130370814+itsmarble@users.noreply.github.com> Date: Thu, 4 May 2023 19:55:58 +0200 Subject: [PATCH 12/56] Update run.bat (#3783) Co-authored-by: Richard Beales --- run.bat | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/run.bat b/run.bat index afbab57a..be748c00 100644 --- a/run.bat +++ b/run.bat @@ -1,8 +1,24 @@ @echo off -python scripts/check_requirements.py requirements.txt +setlocal enabledelayedexpansion + +:FindPythonCommand +for %%A in (python python3) do ( + where /Q %%A + if !errorlevel! EQU 0 ( + set "PYTHON_CMD=%%A" + goto :Found + ) +) + +echo Python not found. Please install Python. +pause +exit /B 1 + +:Found +%PYTHON_CMD% scripts/check_requirements.py requirements.txt if errorlevel 1 ( echo Installing missing packages... - pip install -r requirements.txt + %PYTHON_CMD% -m pip install -r requirements.txt ) -python -m autogpt %* -pause +%PYTHON_CMD% -m autogpt %* +pause \ No newline at end of file From d9170cab22bee570a64507fb08a6f05b8198e208 Mon Sep 17 00:00:00 2001 From: itsmarble <130370814+itsmarble@users.noreply.github.com> Date: Thu, 4 May 2023 20:01:53 +0200 Subject: [PATCH 13/56] Update run.sh (#3752) Co-authored-by: Richard Beales --- run.sh | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/run.sh b/run.sh index edcbc441..b4155d88 100755 --- a/run.sh +++ b/run.sh @@ -1,9 +1,25 @@ #!/bin/bash -python scripts/check_requirements.py requirements.txt + +function find_python_command() { + if command -v python &> /dev/null + then + echo "python" + elif command -v python3 &> /dev/null + then + echo "python3" + else + echo "Python not found. Please install Python." + exit 1 + fi +} + +PYTHON_CMD=$(find_python_command) + +$PYTHON_CMD scripts/check_requirements.py requirements.txt if [ $? -eq 1 ] then echo Installing missing packages... - pip install -r requirements.txt + $PYTHON_CMD -m pip install -r requirements.txt fi -python -m autogpt $@ -read -p "Press any key to continue..." +$PYTHON_CMD -m autogpt $@ +read -p "Press any key to continue..." \ No newline at end of file From 7d234522b7833ada48ed1ddba5e494c24e8e00e7 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Thu, 4 May 2023 20:08:54 +0200 Subject: [PATCH 14/56] ADD: Bash block in the contributing markdown (#3701) Co-authored-by: Richard Beales --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c410d076..5a7c6ace 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -138,7 +138,9 @@ To pick the challenge you like, go to the tests/integration/challenges folder an Here is an example of how to run the memory challenge A and attempt to beat level 3. +```bash pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3 +``` To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder From ea080500494ccfeb1ea3503cb476d81857ebf83d Mon Sep 17 00:00:00 2001 From: bszollosinagy <4211175+bszollosinagy@users.noreply.github.com> Date: Fri, 5 May 2023 00:11:21 +0200 Subject: [PATCH 15/56] BUGFIX: Selenium Driver object reference was included in the browsing results for some reason (#3642) * * there is really no need to return the reference to the Selenium driver along with the text summary and list of links. * * removing unused second return value from browse_website() * * updated cassette * * updated YAML cassette for test_browse_website * * after requirements reinstall, another update YAML cassette for test_browse_website * * another update YAML cassette for test_browse_website, only as a placholder commit to trigger re-testing due to some docker TCP timeout issue * * another update YAML cassette for test_browse_website --------- Co-authored-by: batyu --- autogpt/commands/web_selenium.py | 6 +- .../test_browse_website.yaml | 1849 ++++------------- tests/unit/test_web_selenium.py | 2 +- 3 files changed, 428 insertions(+), 1429 deletions(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 0f5764b5..8cec2323 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -34,7 +34,7 @@ CFG = Config() '"url": "", "question": ""', ) @validate_url -def browse_website(url: str, question: str) -> tuple[str, WebDriver]: +def browse_website(url: str, question: str) -> str: """Browse a website and return the answer and links to the user Args: @@ -50,7 +50,7 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]: # These errors are often quite long and include lots of context. # Just grab the first line. msg = e.msg.split("\n")[0] - return f"Error: {msg}", None + return f"Error: {msg}" add_header(driver) summary_text = summary.summarize_text(url, text, question, driver) @@ -60,7 +60,7 @@ def browse_website(url: str, question: str) -> tuple[str, WebDriver]: if len(links) > 5: links = links[:5] close_browser(driver) - return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver + return f"Answer gathered from website: {summary_text} \n \n Links: {links}" def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: diff --git a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml index 411dbdbc..e73c396b 100644 --- a/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml +++ b/tests/integration/goal_oriented/cassettes/test_browse_website/test_browse_website.yaml @@ -1,1339 +1,4 @@ interactions: -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: - Read file, args: \"filename\": \"\"\n4. search_files: Search Files, - args: \"directory\": \"\"\n5. write_to_file: Write to file, args: - \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse - Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "user", "content": "Determine which next command to use, and respond using the - format specified above:"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3777' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA6RTWWvbQBB+768Y5iUv8p00Ri+lB4UUSg9SUqiKWa/G0tbSjrI7qh2M/3tYHUns - JLS0rzM737UzOzQpxqhzJbqsisH5682n7c33z2dfp+l2oq4y++7th5s3X870BX3ECHn5i7R0E0PN - ZVWQGLYYoXakhFKMJy/n0/npdDKbRFhySgXGmFUymA3PBlK7JQ/Gs/EEI6y9ygjjHVaOy0oWwmuy - HuP56XmE99h39enpOEJhUcVdaTKen+8j1DkbTR7jHzssyfe4jgvCGJX3xouyElSyFbLBwS6xAAAJ - Ss51lotPMIau2DVoK6GY4AVsTFFA7QkkJ1g63nhabGjpjRBoLktlUxCG38YbgVykikejJfPaD4W9 - dqqikNZIK1EFZzWNSkqNqODPL2azkbEpbYe5lAUEqJUJeDlB5Ywm4FXLy7weJhg9FOlIebbGZq3S - y5xAlF+Do+vaOPJQ0r0wBb3mp0lUSwGXz5s0vpVCXoCroB9W7EDy0FD+kb6qULaVNoBvf5lfeNM1 - k8QO4P2zaTTtKxdQ7vvCoGBlCgKrSkrh5JBvKFs5aQZ7PUH3olu4Azl3tf7VkTntjBhtfNmviSVq - Bsn62oUxJXABqgSlde2UUHHTxG5s9rSh5mM2AdZmEKLgpqXZOdLSuDpO2Fek1v+0pw9y/vPWtZT7 - qD+bDu3R1YTMWzWH9EeqlcuOL65t1K5ox//rig7YWuDrmnx42aJfha/plvmR3Ve93cZy5zyxe9xH - uDLW+HzR3h3G6IUrjLChxni8/7l/cQsAAP//AwDcu28dVgUAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4c98990acee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:04 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '13620' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86493' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.338s - x-request-id: - - 6ab74012054dda255680e3e4da7d724b - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"Books - to Scrape We love being scraped! Home Books Philosophy Meditations Meditations - \u00a325.89 In stock (1 available) Warning! This is a demo website for web scraping - purposes. Prices and ratings here were randomly assigned and have no real meaning. - Product Description Written in Greek, without any intention of publication, - by the only Roman emperor who was also a philosopher, the Meditations of Marcus - Aurelius (AD 121-180) offer a remarkable series of challenging spiritual reflections - and exercises developed as the emperor struggled to understand himself and make - sense of the universe. Ranging from doubt and despair to conviction and ex Written - in Greek, without any intention of publication, by the only Roman emperor who - was also a philosopher, the Meditations of Marcus Aurelius (AD 121-180) offer - a remarkable series of challenging spiritual reflections and exercises developed - as the emperor struggled to understand himself and make sense of the universe. - Ranging from doubt and despair to conviction and exaltation, they cover such - diverse topics as the nature of moral virtue, human rationality, divine providence, - and Marcus'' own emotions. But while the Meditations were composed to provide - personal consolation and encouragement, in developing his beliefs Marcus Aurelius - also created one of the greatest of all works of philosophy: a timeless collection - of extended meditations and short aphorisms that has been consulted and admired - by statesmen, thinkers and readers through the centuries. ...more Product Information - UPC4f19709e47883df5 Product TypeBooks Price (excl. tax)\u00a325.89 Price (incl. - tax)\u00a325.89 Tax\u00a30.00 Availability In stock (1 available) Number of - reviews 0 Products you recently viewed The Nicomachean Ethics \u00a336.34 In - stock Add to basket Run, Spot, Run: The ... \u00a320.02 In stock Add to basket - Critique of Pure Reason \u00a320.75 In stock Add to basket At The Existentialist - Caf\u00e9: ... \u00a329.93 In stock Add to basket Kierkegaard: A Christian Missionary - ... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21 - In stock Add to basket\"\"\" Using the above text, answer the following question: - \"What is the price of the book?\" -- if the question cannot be answered using - the text, summarize the text."}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '2407' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SOwU7CQBRF935Fc9fTpgWKMDs3GhaoMe7EkOn0Qce28yadR0RJv8Y/8csMieL2 - JPfcc4KroWEbI7YPXXp98/54t1/lTw/3s+W6Ot5+hPZY56v2c77socDVG1n5XWSW+9CROPZQsAMZ - oRq6mC8mi9mkKHOFnmvqoLEPkk6zMpXDUHGaT/MCCodo9gR9Qhi4D7IVbslH6NmiVPh3X3gxVxAW - 011ImRejgm3YWYrQLyf0FP+sA3cEDROji2K8nBvZC/lz/3NDSRicpYR3iTSUVMxtssGaaifmfBs3 - SFxMvr8mZbZYZhgVds672GwHMpE9NKJwgILzNR2h8/F1vPoBAAD//wMA+xL6BVEBAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d12a9a3cee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:12 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '1472' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89422' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 385ms - x-request-id: - - f40352b97c4c4547530fa7786b4ede37 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"The - price of the book \"Meditations\" is \u00a325.89.\"\"\" Using the above text, - answer the following question: \"What is the price of the book?\" -- if the - question cannot be answered using the text, summarize the text."}], "temperature": - 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '338' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SOwUrDQBRF935FuOtJSFLTprNTitCF4MKNWpHp5LUZm8wbMq9UW/I1/olfJgWt - 2wP33HOCa6BhWyO2D106uzk8LBeb5/5zcdj1rfdheqzvllt6us2PUOD1O1n5XWSW+9CROPZQsAMZ - oQa6mNZlfV0WVanQc0MdNLZB0klWpbIf1pzmk7yAwj6aLUGfEAbug7wJ78hH6Gqu8K++4GKqICym - u5BZNSrYlp2lCP1yQk/xzzlwR9AwMbooxsu5kL2QP9c/tpSEwVlKeJNIS8maeZescE+NE3N+jSsk - LibfX2WV1fMMo8LGeRfbt4FMZA+NKByg4HxDH9D5+Dpe/QAAAP//AwDvavKuTwEAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d1cfbdbcee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:13 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '1372' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89929' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 47ms - x-request-id: - - 80012336eac7da800f35e4e1dc53be35 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI - was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '600' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SOX0vDMBRH3/0U4fecjnZ/dMuboAMRxKeBiIwsubbZ2tzQ3FF19LvLYLrXA+dw - TggeBq6x4rrUFnf3w+uzX2826xz2tD4c5eGFH3/e+ln9PUCDd3tycjEmjrvUkgSO0HA9WSEPU90u - p8v5tFrMNTr21MKgTlLMJotCjv2Oi3JWVtA4ZlsTzAmp5y7JVvhAMcOsKo1r+oo1hMW2/6Aqy1HD - NRwcZZj3EzrKf82eW4KBzTlksVHOhxyF4vn+SQ02q8uxstGryNKEWKtIg2psVo1NiSL5CUaNzxBD - brY92cwRBlk4QSNET18w5fgx3vwCAAD//wMAlok3zUkBAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d2a38b9cee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:15 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '928' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89866' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 88ms - x-request-id: - - bdf544a1c9ea2ee0bb022ee726a80ed4 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: - Read file, args: \"filename\": \"\"\n4. search_files: Search Files, - args: \"directory\": \"\"\n5. write_to_file: Write to file, args: - \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse - Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past: \nI was - created and nothing new has happened."}, {"role": "user", "content": "Determine - which next command to use, and respond using the format specified above:"}, - {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\n \"reasoning\": \"The task requires - me to visit a website and find the price of a book. The browse_website command - is the best option for this task.\",\n \"plan\": \"- Use the browse_website - command to visit the website\\n- Find the price of the book\\n- Write the price - to a file named ''browse_website.txt''\\n- Use the task_complete command to - complete the task\",\n \"criticism\": \"I need to ensure that I am accurately - finding the price of the book and writing it to the correct file.\",\n \"speak\": - \"I will use the browse_website command to visit the website and find the price - of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": - {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": - \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '5772' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA7RTzW7aQBC+9ylGc+FiLDChpb5VjVRR9e+QnuoKrdcD3mDvuLtDSYN4mrxJn6xa - 20AhUZVLj56Z72c+z+7QFJiiLpXouqmGr95sv3y4vV6X0/ef395f/5zn9G5elHKvPv1KMELOb0lL - j4g1101FYthihNqREiowHb+cJbOrZDydRlhzQRWmuGpkOImnQ9m4nIejyWiMEW68WhGmO2wc140s - hNdkPabjyTiJ8ER+bCRJEqGwqOo0O51c7SPUJRtNHtNvO6zJH4gdV4QpKu+NF2Ul2GQrZMMKu8wC - AGQoJW9WpfgMU+iLfYPuJBQznMPWVBVsnRECKQkaZzQBL9uPnHkNwqBgaSoCq2oqYJA73npabCn3 - RiiWOxnEGUZ/CzhSnq2xq07lpiQQ5dfg6MfGOPJQU+B9lmwMAd6OLoQXrRPNda1sAcZ3APIC3IRM - YckOpAwN5deXvppK2c7SEL76Tvpp5ue6+0coWWZPKsHNov/1ZyrH2mHqwrJ2Row2vj78MEvUAsn6 - jQswJTAHVbd+jV21RJqdIy1g7JJdrdpkhNtW67oVD8hjxXjIKcD7ewdjz5gq1i3NZaS+IbU+O6bN - /w42zrBzsI8Op96zP7r0QNKZO3NzsYNyq8tH0jXC7InisZUznsu3dfPkgoOPVBhpo/SDkPrvh2Qa - z14fl2oX6/fL7B73ES6NNb5cdK8KU/TCDUZobEF3mI723/cv/gAAAP//AwBJZ4NT8QQAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d310f1acee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:27 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '12183' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86494' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.337s - x-request-id: - - ea952acfdafdc0ae3345943425fec326 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\n\nYou will receive the current summary and the latest development. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': - ''system'', ''content'': ''This reminds you of these events from your past: - \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing - new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '714' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SPTUvDQBRF9/6K4a4npemnzq6gouDCvUiZTp7J2Pky74VYSv67VJRuD9zDuWf4 - Bgaus+JiCdV2N77aZngYx8dhs3r5OmyPT/fhuJYSbQuNfPgkJ3+LmcuxBBKfEzRcT1aogak3t4vb - 1aLezjVibijAoC1SLWfrSob+kKv5cl5DY2DbEswZpc+xyF7ykRLD1PWdxtV95WsNyWLDlSxXk4br - snfEMG9nROJ/a58DwcAyexab5NKYk1C69O9YWcUnFopaPauenA1BSWdFpSydT61KNKrOsupsKZSo - UeyTIxVP6veqz2mGSePDJ8/dvifLOcGAJRdo+NTQN8x8ep9ufgAAAP//AwB8vP+2ZgEAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d901cdecee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:31 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '791' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89839' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 107ms - x-request-id: - - 782df5c2a8ff131c4a94e1db54cf90ad - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. read_file: - Read file, args: \"filename\": \"\"\n4. search_files: Search Files, - args: \"directory\": \"\"\n5. write_to_file: Write to file, args: - \"filename\": \"\", \"text\": \"\"\n6. browse_website: Browse - Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past: \nAs - a system, I recall that nothing new has happened since my creation."}, {"role": - "user", "content": "Determine which next command to use, and respond using the - format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I will use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\n \"reasoning\": \"The task requires - me to visit a website and find the price of a book. The browse_website command - is the best option for this task.\",\n \"plan\": \"- Use the browse_website - command to visit the website\\n- Find the price of the book\\n- Write the price - to a file named ''browse_website.txt''\\n- Use the task_complete command to - complete the task\",\n \"criticism\": \"I need to ensure that I am accurately - finding the price of the book and writing it to the correct file.\",\n \"speak\": - \"I will use the browse_website command to visit the website and find the price - of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": - {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": - \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I will write the price of the book to a file named ''browse_website.txt''.\",\n \"reasoning\": - \"The task requires me to write the price of the book to a file. The write_to_file - command is the best option for this task.\",\n \"plan\": \"- Use the - write_to_file command to write the price of the book to a file named ''browse_website.txt''\\n- - Use the task_complete command to complete the task\",\n \"criticism\": - \"I need to ensure that I am writing the correct information to the file and - that the file is being created in the correct location.\",\n \"speak\": - \"I will use the write_to_file command to write the price of the book to a file - named ''browse_website.txt''.\"\n },\n \"command\": {\n \"name\": - \"write_to_file\",\n \"args\": {\n \"filename\": \"browse_website.txt\",\n \"text\": - \"The price of the book ''Meditations'' is \u00a325.89.\"\n }\n }\n}"}, - {"role": "system", "content": "Command write_to_file returned: File written - to successfully."}, {"role": "user", "content": "Determine which next command - to use, and respond using the format specified above:"}], "temperature": 0, - "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '7026' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA6RSTW/TQBC98ytWc+nFieI0bYJvSFxKhUAoVEg1itbrsb3E3jE7Y6Vt5P+O/JEP - gkAgrvPx5r03bw82hQhMocVUdTlZvtl9TO53z/fr8OEDfXr48tZ8fzd7Wb/e3dELBEDJNzQybkwN - VXWJYslBAMajFkwhCm9X89ViHi7DACpKsYQI8lom19ObiTQ+ocnsehZCAA3rHCHaQ+2pqmUjtEXH - EIU3q3kAJ/BT43YegJDo8lRaLhZtAKYga5AhetxDhXwA9lQiRKCZLYt20tEkJ+g6CfvYKaVUDFJQ - kxfCMURqLI4NfJKuGMOd2tmyVA2jkgKVaN5uRoKoDFWVdqkSUsfaYWoaQ3AO6VEzOevyAXc9jqlC - s0oQ3REhVdwYg8xZU5bPqsf/7WXLfTNBFkV159lfkalL7QYeE/X5n6VdgBlvxRrL1cExh9gvouPG - d2ta/iyhYUyVIe/RyFHy2VI3k6B1+ZlJGfm+P66pwd9LoVyj3v7/JwfINjgEZxz/JTdOVzhc+wn+ - gpT2+WXkzjNyCkjtrUFF2fBjoq26eo+pFd09mq9O2cmocWnv285bEexToFVmSzyy7xWMQmLXQhtA - Zp3lYjOchQhYqIYArEvxCaJZ+7V99QMAAP//AwBD34ZLKAQAAA== - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7bfe4d9589e6cee1-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Sun, 30 Apr 2023 08:09:41 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '9223' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86491' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.339s - x-request-id: - - 236ab87ae664fc82d42c2ea35a36b68d - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: - List Files in Directory, args: \"directory\": \"\"\n4. read_file: - Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: - Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "user", "content": "Determine which next command to use, and respond using the - format specified above:"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3786' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA6STTW/TQBCG7/yK0Vx6cb4VSH1BqgARqTkgUSqBUbRej+1t7B2zOyZBUf57tXHS - ktAiPq4zs+/7vGPPFk2GMepSia6bqvfq6jMVN++au7vFaPPh6vrTYla8zXm1KKc3bzBCTu9Iy+FF - X3PdVCSGLUaoHSmhDOPRy9n48nIyHI8jrDmjCmMsGulN+tOetC7l3nAyHGGErVcFYbzFxnHdyFJ4 - RdZjPJsOI3zUfqiPJ6MIhUVVD6XRcDbaRahLNpo8xl+2WJM/6jquCGNU3hsvykqgZCtkQ4JtYgEA - EpSS26IUn2AMh+KhQRsJxQTnYIkyEIbWE0hJkDpee1quKfVGCDTXtbL7ie/GG4FSpIkHg5R55fvC - XjvVUFjYQCtRFRctDWrKjKgQ0S8nk4GxGW36pdQVBKncBL2SoHFGE3De+TKv+glGP3M6Up6tscUR - Viv7x6Bh5tgMDU/K6RJydk+bQ/oDjPUNaTG22Jfff1xcg+aMzsGaStmOqQc3f8+TJLYH887r1CiM - P7+f/btbF+Qf+8KgIDcVgVU1ZXBxCtKXjVyc4WtnxGjj6/OfICXQylHeVrAuyT67jzBL1rcugCiB - +SO0ZudCrD3c+d58Q2p1NF2bqvrnr/nbn6iz3EXHQzio/XIHYWEdzan9GbVyxfkNdY3WVd3z/zqK - E7dO+FtLPkx26rdhxcY/Hff1Me4+8iF5Yne4izA31vhy2Z0RxuiFG4xwb43xcPd19+IeAAD//wMA - V6vswigFAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7c0cb0782d4f944a-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 02 May 2023 02:03:56 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '13321' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86494' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.337s - x-request-id: - - 63bb414ee1d800b5915650650e19e08c - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\nYou will receive the current summary and the your latest actions. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI - was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], - "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '599' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SOQUvDQBQG7/6K5TtvSpLWtNmjCtKrHgRFynbz2l2b7Fuzr1Qo+e9SqPY6MMOc - EToYOG/FDakvlg/vu/L7eblYtq/rhVvvnlaNbw7pbXx8OUGDt1/k5GrMHA+pJwkcoeFGskIdTNWs - 6radl4taY+COehjskxTz2X0hx3HLRTkvK2gcs90TzBlp5CHJRvhAMcO0tcYtfcMawmL7f1CV1aTh - PAdHGebjjIHyX3PknmBgcw5ZbJTLIUeheLlfq5PN6nqsbOxUZPEh7lWkk/I2K29TokjdDJPGLsSQ - /WYkmznCIAsnaITY0Q9MOX1Od78AAAD//wMAckpATEkBAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7c0cb0f51a8a944a-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 02 May 2023 02:04:03 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '960' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89866' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 88ms - x-request-id: - - b05b40f91f03090235ef212bd7352fd7 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: - List Files in Directory, args: \"directory\": \"\"\n4. read_file: - Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: - Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past: \nI was - created and nothing new has happened."}, {"role": "user", "content": "Determine - which next command to use, and respond using the format specified above:"}, - {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": - \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website - command to visit the website and search for the price of the book by inspecting - the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit - the website\\n- Inspect the HTML code to find the price of the book\\n- Write - the price to a file named ''browse_website.txt''\",\n \"criticism\": - \"I need to be careful when inspecting the HTML code to ensure that I find the - correct price.\",\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\n },\n \"command\": - {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": - \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '5735' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA7xTS27bMBDd9xTEbLyhDSmO60jLBgWaRdAicNBPVAgUNZZYSxyVHMMuDJ0mN+nJ - Cn0cN063zUrgPOp9Bo8HMDnEoEvFum6q6fLdt3V4fRN+Xa7uPt9+8e9/Bh/y6OP99Wr56Q4kUPYD - NY9/zDTVTYVsyIIE7VAx5hCHb68uomgeXM4l1JRjBTEUDU/ns8WUty6jaTAPQpCw9apAiA/QOKob - Tpk2aD3E4Ty4lHAiPwHRUgITq+o0WgRhK0GXZDR6iB8OUKM/EjuqEGJQ3hvPynJnkyyj7SIcEiuE - EAlwSduiZJ9ALMbhCOCeu2ECN8Ii5oJJ7JxhFFyiaJzRKGjdHzKiTQcrsTYVCqtqzMUkc7TzmO4w - 84ZxxnuezBKQf2s4VJ6sscVRSCsrtn6Q6MVSprQn1VTXyv4XF02l7GBgKu5fW1w7w0YbX5/vGq3f - uk5Jca/Q8xovxqoJY/uxJudQs8hN9yH361zAN6g2R/KdqapXWfDgoJXHmo3sL1rWkQzmnrk5y6Bc - cV7QAejuniheWnnGc97r1T8DTm4xN6y6t+cn3cJ/P14sZlfRU6g+2JgvsS20EtbGGl+mQ50hBs/U - gARjc9xDHLTf2zd/AAAA//8DAOu3m7FtBAAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7c0cb0fbc9c9944a-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 02 May 2023 02:04:15 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '11284' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86494' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.336s - x-request-id: - - ee40eef17c77173b0ccd5fd28eb72e65 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your - task is to create a concise running summary of actions and information results - in the provided text, focusing on key and potentially important information - to remember.\n\nYou will receive the current summary and the your latest actions. - Combine them, adding relevant key information from the latest development in - 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': - ''system'', ''content'': ''This reminds you of these events from your past: - \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing - new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '713' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA0SPwUrDQBRF937FcNeTkjamtbNTUVCXFYWIlOnktRmbzBszr9Ra8u9SUbo9cA/n - HuFrGLjGiutim81uqvX9oqq+y+3d9On2+XLx8vo5q/I9PcocGrz6ICd/i5HjLrYkngM0XE9WqIYZ - T68m83mRlzONjmtqYbCJkhWjMpNdv+IsL/IxNHbJbgjmiNhzF2UpvKWQYMaTXOPsPvNSQ1hseyZF - OWi4hr2jBPN2REfp39pzSzCwKfkkNsipkYNQOPVfJ2VVOiShTqsH1ZOzbauksaICS+PDRgXaq8Ym - 1dgYKVCtkg+OVHdQv1c9hxEGjbUPPjXLnmziAIMkHKHhQ01fMPnwPlz8AAAA//8DAGLYAldmAQAA - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7c0cb15249d6944a-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 02 May 2023 02:04:18 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '1076' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '89839' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 107ms - x-request-id: - - 3534821a832571f79f963fe548953633 - status: - code: 200 - message: OK -- request: - body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You - are browse_website-GPT, an AI designed to use the browse_website command to - visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer - the question ''What is the price of the book?'' and write the price to a file - named \"browse_website.txt\", and use the task_complete command to complete - the task.\nYour decisions must always be made independently without seeking - user assistance. Play to your strengths as an LLM and pursue simple strategies - with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command - to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer - the question ''What is the price of the book?''\n2. Write the price of the book - to a file named \"browse_website.txt\".\n3. Use the task_complete command to - complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1. - ~4000 word limit for short term memory. Your short term memory is short, so - immediately save important information to files.\n2. If you are unsure how you - previously did something or want to recall past events, thinking about similar - events will help you remember.\n3. No user assistance\n4. Exclusively use the - commands listed in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: - Append to file, args: \"filename\": \"\", \"text\": \"\"\n2. - delete_file: Delete file, args: \"filename\": \"\"\n3. list_files: - List Files in Directory, args: \"directory\": \"\"\n4. read_file: - Read file, args: \"filename\": \"\"\n5. write_to_file: Write to file, - args: \"filename\": \"\", \"text\": \"\"\n6. browse_website: - Browse Website, args: \"url\": \"\", \"question\": \"\"\n7. - delete_agent: Delete GPT Agent, args: \"key\": \"\"\n8. get_hyperlinks: - Get text summary, args: \"url\": \"\"\n9. get_text_summary: Get text summary, - args: \"url\": \"\", \"question\": \"\"\n10. list_agents: List - GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\": - \"\", \"message\": \"\"\n12. start_agent: Start GPT Agent, args: - \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n13. - Task Complete (Shutdown): \"task_complete\", args: \"reason\": \"\"\n\nResources:\n1. - Internet access for searches and information gathering.\n2. Long Term memory - management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File - output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your - actions to ensure you are performing to the best of your abilities.\n2. Constructively - self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions - and strategies to refine your approach.\n4. Every command has a cost, so be - smart and efficient. Aim to complete tasks in the least number of steps.\n5. - Write all code to a file.\n\nYou should only respond in JSON format as described - below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": - \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- - long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": - \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": - \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} - \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", - "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": - "system", "content": "This reminds you of these events from your past: \nAs - a system, I recall that nothing new has happened since my creation."}, {"role": - "user", "content": "Determine which next command to use, and respond using the - format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html - and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website - command to visit the website and search for the price of the book by inspecting - the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit - the website\\n- Inspect the HTML code to find the price of the book\\n- Write - the price to a file named ''browse_website.txt''\",\n \"criticism\": - \"I need to be careful when inspecting the HTML code to ensure that I find the - correct price.\",\n \"speak\": \"I will use the browse_website command - to visit the website and find the price of the book.\"\n },\n \"command\": - {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": - \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": - \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond - using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to write the price of the book to a file named - ''browse_website.txt''.\",\n \"reasoning\": \"I can use the write_to_file - command to write the price of the book to a file named ''browse_website.txt''.\",\n \"plan\": - \"- Use the write_to_file command to write the price of the book to a file named - ''browse_website.txt''.\",\n \"criticism\": \"I need to ensure that the - file is created in the correct directory.\",\n \"speak\": \"I will use - the write_to_file command to write the price of the book to a file named ''browse_website.txt''.\"\n },\n \"command\": - {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": - \"browse_website.txt\",\n \"text\": \"The price of the book ''Meditations'' - is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command - write_to_file returned: File written to successfully."}, {"role": "user", "content": - "Determine which next command to use, and respond using the format specified - above:"}], "temperature": 0, "max_tokens": 0}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '6857' - Content-Type: - - application/json - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: !!binary | - H4sIAAAAAAAAA6SS0WvbMBDG3/dXiHtWgjM3TaLHllG6wmClpbB6BFU+W1pkyUjnpSP4fx+KnabN - Hla2R326+77f6bQDU4IApSWpprWTxcW36uru4Xbx+SH7epP51a2eX93ffKoWl+UlcPBPP1DR2DFV - vmktkvEOOKiAkrAEMTtfflyt8my+5ND4Ei0IqFua5NP5hLrw5CdZns2AQxdljSB20AbftLQmv0EX - QczmZysOR/PjRX7GgTxJe5TOl3nPQWlvFEYQjztoMB6Mg7cIAmSMJpJ0lDC9I3RphF3hGGOsANK+ - qzXFAgQbxfECnymJBVwzLX8iG5mwZKSRkYybKbtmDpPgWRfxRV8fSlNPI92+IOqOWOm3bloAfx0U - UEbvjKv/ksb2RhoDMhmQOc+qLqQzkyq9VEwpJDeYsJR0/4HUWukGmgm7/2cXFQwZZWIzWH3xDk8q - Yoty856xt8Za5vz2Tdjg1PPDKkemPzbpZINDyJsZTlhkqE8/wev9DAZ3CejIGDulMMaqs/bXC9Ae - amQrXA89h8o4E/V6cAIBkXwLHIwr8RlE1n/vP/wGAAD//wMAY/RLTo0DAAA= - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 7c0cb15989dc944a-SJC - Cache-Control: - - no-cache, must-revalidate - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 02 May 2023 02:04:27 GMT - Server: - - cloudflare - access-control-allow-origin: - - '*' - alt-svc: - - h3=":443"; ma=86400, h3-29=":443"; ma=86400 - openai-model: - - gpt-3.5-turbo-0301 - openai-organization: - - user-adtx4fhfg1qsiyzdoaxciooj - openai-processing-ms: - - '8172' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15724800; includeSubDomains - x-ratelimit-limit-requests: - - '3500' - x-ratelimit-limit-tokens: - - '90000' - x-ratelimit-remaining-requests: - - '3499' - x-ratelimit-remaining-tokens: - - '86500' - x-ratelimit-reset-requests: - - 17ms - x-ratelimit-reset-tokens: - - 2.333s - x-request-id: - - 7729a95d875601d045b688125b262d58 - status: - code: 200 - message: OK - request: body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are browse_website-GPT, an AI designed to use the browse_website command to @@ -1398,22 +63,22 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7RTS2/TQBC+8ytWc3YSB0Pq+oKEhEQuOVAhkHAVbdYTexvvjrs7TlNF+e9oYzsk - KRJFiOs8vsc89qALyEBVkpVp6tHNx+fvi0/+s3vcpYvZbLV5MPE8uVt8eV5MZxABrR5Qcd8xVmSa - GlmThQiUQ8lYQDadpUmcprPZbQSGCqwhg7LhUTJ+P+LWrWgUJ/EUImi9LBGyPTSOTMNLpg1aD1n6 - 7jaCX9in+NskjYCJZX0KTeP05hCBqkgr9JD92INBP+A6qhEykN5rz9JyUEmW0QYH+9wKIUQOXFFb - VuxzyEQf7BO44xDMYS4sYiGYROtRcIVi5ejJ4/IJV14zCkXGSHus2GqvWVTMTTaZrIg2fszklZMN - hoFNlGRZU9nixGChWQaLfpkkE20L3I0rNrUIUGsd8CoUjdMKBa07XqLNOIfoXKdD6clqWw5ilbSv - FhpqhmRIeJROVWJN7nXkTS1txzsSX/+eM8/tSNz9gbMrkls8SzMJKda6RmGlweKKc8w7PnYNklj6 - zbK/qQtFp9hQdeVPOc1aaW+uLwGtb11okyzmQpp+ctqWJyOKnEPFQts1OXPctCD7YuRnEHIb+kPB - eQ/TBVxwfb0G36DcDBKfdF3/zwPoiA/R8EE95osHCpvpNF2KuNIuXXn9fF2idXXX/k/fdMHWAT+2 - 6ENlh/4tzF/739v9MNg9Wu6d5/YAhwjW2mpfLbv/gww8UwMRHKkhiw/3hzc/AQAA//8DAMkYr7xh - BQAA + H4sIAAAAAAAAA6RTS2/TQBC+8ytGc+nFeZe09QUBPVCJngBVAqNovZ7Y29g77u64CUT572jtuCWh + RTyu8/heu7NFk2GMulCiq7ocnL1Np2cfri8/p7PN/YfLef36li6zRjZ339+kGCGnt6RlvzHUXNUl + iWGLEWpHSijDeDI/n01n8/npWYQVZ1RijHktg9nw5UAal/JgPBtPMMLGq5ww3mLtuKplIbwi6zE+ + P72I8BH7oT49nUYoLKp8KE3GF5NdhLpgo8lj/GWLFfke13FJGKPy3nhRVoJKtkI2ONgmFgAgQSm4 + yQvxCcawL+4btJFQTPAKLFEGwtB4AikIUsdrT4s1pd4IgeaqUraduDfeCBQidTwapcwrPxT22qma + QmAjrUSVnDc0qigzooJFv5jNRsZmtBkWUpUQoJYm4BUEtTOagJcdL/NqmGD0s05HyrM1Nu/FamX/ + WGiY6Zuh4Uk5XcCS3dPkkH4DY31NWozN2/K7j9fvQXNGx8LqUtlO0wA+/b2eJLEDuOq4DonC+PP5 + tHs3LsA/9oVBwdKUBFZVlMHJoZChbOSkXeyFivKrxf4XHuh8qPVTR661M2K08dXx30kJtHK0bEpY + F2SfjTHMkvWNI7h6tKnZuRBEa+c4aV+TWvV8a1OW//z+v/12HeUu6k9nj/bL5YSIOzWH9EeqlcuP + r65rNK7s1v/rjA7YOuC7hnyY7NBvCiVg/NN2X/V2W8t754nd4S7CpbHGF4vu8DBGL1xjhC01xuPd + 192LHwAAAP//AwBZhA00WgUAAA== headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7c15cf968c8f2e2d-DFW + - 7c23ec547cf7c30c-VIE Cache-Control: - no-cache, must-revalidate Connection: @@ -1423,7 +88,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 03 May 2023 04:38:03 GMT + - Thu, 04 May 2023 21:44:20 GMT Server: - cloudflare access-control-allow-origin: @@ -1433,9 +98,9 @@ interactions: openai-model: - gpt-3.5-turbo-0301 openai-organization: - - significant-gravitas + - user-cbwy2y25pylufhkfs0la9nqe openai-processing-ms: - - '14086' + - '13677' openai-version: - '2020-10-01' strict-transport-security: @@ -1453,7 +118,262 @@ interactions: x-ratelimit-reset-tokens: - 2.338s x-request-id: - - 7d5960d2e736b510f90bce2462edc349 + - f087e73375928e99eea43f7daf78c450 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"Books + to Scrape We love being scraped! Home Books Philosophy Meditations Meditations + \u00a325.89 In stock (1 available) Warning! This is a demo website for web scraping + purposes. Prices and ratings here were randomly assigned and have no real meaning. + Product Description Written in Greek, without any intention of publication, + by the only Roman emperor who was also a philosopher, the Meditations of Marcus + Aurelius (AD 121-180) offer a remarkable series of challenging spiritual reflections + and exercises developed as the emperor struggled to understand himself and make + sense of the universe. Ranging from doubt and despair to conviction and ex Written + in Greek, without any intention of publication, by the only Roman emperor who + was also a philosopher, the Meditations of Marcus Aurelius (AD 121-180) offer + a remarkable series of challenging spiritual reflections and exercises developed + as the emperor struggled to understand himself and make sense of the universe. + Ranging from doubt and despair to conviction and exaltation, they cover such + diverse topics as the nature of moral virtue, human rationality, divine providence, + and Marcus'' own emotions. But while the Meditations were composed to provide + personal consolation and encouragement, in developing his beliefs Marcus Aurelius + also created one of the greatest of all works of philosophy: a timeless collection + of extended meditations and short aphorisms that has been consulted and admired + by statesmen, thinkers and readers through the centuries. ...more Product Information + UPC4f19709e47883df5 Product TypeBooks Price (excl. tax)\u00a325.89 Price (incl. + tax)\u00a325.89 Tax\u00a30.00 Availability In stock (1 available) Number of + reviews 0 Products you recently viewed The Nicomachean Ethics \u00a336.34 In + stock Add to basket Run, Spot, Run: The ... \u00a320.02 In stock Add to basket + Critique of Pure Reason \u00a320.75 In stock Add to basket At The Existentialist + Caf\u00e9: ... \u00a329.93 In stock Add to basket Kierkegaard: A Christian Missionary + ... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21 + In stock Add to basket\"\"\" Using the above text, answer the following question: + \"What is the price of the book?\" -- if the question cannot be answered using + the text, summarize the text."}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2407' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOzUrDQBRG9z7F8K0nIU1MbGdpVy66EKogVspkctuMTebGzC0WSp7GN/HJpOLP + 9sA533eGb2DgWiuuH7rkZlnn93ersHg/Vu3YP9V290j+Iazd8vYNGly/kpMfI3XcDx2J5wANN5IV + amBm1bzIi6qqKo2eG+pgsB8kKdIykeNYc5IV2Qwax2j3BHPGMHI/yFb4QCHCXM9Ljf/2H89nGsJi + uz9SZtWk4Vr2jiLM8xk9xd/qyB3BwMboo9ggl48chMLl/7olNYzekeKdkpZUzXxQG6yo8WIvs3ED + 5aP6/MjLdL7QyjrHY+PDXgl/G0InSTFp7Hzwsd2OZCMHGEThARo+NHSCyaaX6eoLAAD//wMAzURU + NmgBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c23eccb8cf6c30c-VIE + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 21:44:27 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-cbwy2y25pylufhkfs0la9nqe + openai-processing-ms: + - '1587' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89422' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 385ms + x-request-id: + - eed7defd0a7c4dea6073247d0781f272 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "\"\"\"The + price of the book \"Meditations\" is \u00a325.89, according to the text.\"\"\" + Using the above text, answer the following question: \"What is the price of + the book?\" -- if the question cannot be answered using the text, summarize + the text."}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '361' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SQzUrDQBSF9z7FcNaT0CQmxlnqQlAE/6CClTKd3DZjk7khcwvF0qfxTXwyifiz + /eCc83EO8A0MXGvF9UOXnF2u8sdQX9/nDxzm2ZW8X+zKu35ON9vnARq8eiMnP4nUcT90JJ4DNNxI + VqiByaq6yIuqqmqNnhvqYLAZJCnSMpHduOJkVswyaOyi3RDMAcPI/SBL4S2FCFOdavxX/+E80xAW + 2/2RujxquJa9owjzckBP8bdz5I5gYGP0UWyQyZCDUJjsn1pSw+gdKV4raUmtmLdqgVtqvNhpNS6g + fFSfH3mZ1udaWed4bHzYKOHvhNBeUhw11j742C5HspEDDKLw9JQPDe1hZsfX48kXAAAA//8DACbo + IMRmAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c23ecd6b8f5c30c-VIE + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 21:44:29 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-cbwy2y25pylufhkfs0la9nqe + openai-processing-ms: + - '1434' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89923' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 50ms + x-request-id: + - 5fcc4683dce0ec6fdb70e8921ec529a9 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '599' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOQUvDQBQG7/6K5TtvStJgW/eoIBSKePGgImWz+9qsJvuW7ItVS/67FKq9Dsww + RwQPA9dacX3qiuVdM3/a3NbVuAqfj5sfR8v7l/Hh+bts/Boa3LyTk7Mxc9ynjiRwhIYbyAp5mGqx + quf1YrEsNXr21MFgn6SoZ9eFjEPDRVmXFTTGbPcEc0QauE+yFf6gmGFu5hqX9AVrCIvt/kFVVpOG + azk4yjCvR/SU/5oDdwQDm3PIYqOcDjkKxdP9Wh1sVudjZaNXkaUNca8iHVRrs2ptShTJzzBp7EIM + ud0OZDNHGGThBI0QPX3BlNPbdPULAAD//wMAejd5ykkBAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c23ece48f6cc30c-VIE + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 21:44:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-cbwy2y25pylufhkfs0la9nqe + openai-processing-ms: + - '837' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89867' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - 69137c6e42f4c77126b19b5a1f01ad0d status: code: 200 message: OK @@ -1509,23 +429,23 @@ interactions: {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website - command to visit the website and search for the price of the book.\",\n \"plan\": - \"- Use the browse_website command to visit the website\\n- Search for the price - of the book\\n- Save the price to a file named browse_website.txt\\n- Use the - task_complete command to complete the task\",\n \"criticism\": \"I need - to ensure that I am searching for the correct information on the website and - that I am saving the information to the correct file.\",\n \"speak\": - \"I will use the browse_website command to visit the website and search for - the price of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + command to visit the website and search for the price of the book by inspecting + the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit + the website\\n- Inspect the HTML code to find the price of the book\\n- Write + the price to a file named ''browse_website.txt''\\n- Use the task_complete command + to complete the task\",\n \"criticism\": \"I need to be careful when + inspecting the HTML code to ensure I find the correct price.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond + "content": "Command browse_website returned: Answer gathered from website: The + price of the book \"Meditations\" is \u00a325.89, according to the text. \n + \n Links: [''Books to Scrape (http://books.toscrape.com/index.html)'', ''Home + (http://books.toscrape.com/index.html)'', ''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)'', + ''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)'', + '' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)'']"}, + {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}], "temperature": 0, "max_tokens": 0}' headers: Accept: @@ -1535,7 +455,7 @@ interactions: Connection: - keep-alive Content-Length: - - '5788' + - '5682' Content-Type: - application/json method: POST @@ -1543,20 +463,20 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA7xTzY7aMBC+9ymsOQcUQCyQYw+VaMWh0raq2lSR4wyJN4knsgeFLsrT9E36ZFX+ - gE33uj16fr6fmfEFdAIBqEyyKqtitnn/61t2qD8dntWHz+nj93W+f/5a508bjM1H8IDiJ1Q8dMwV - lVWBrMmAB8qiZEwgWDxsV/52+7DdeVBSggUEkFY8W83XMz7ZmGb+yl+ABycnU4TgApWlsuKIKUfj - IFis/I0HN/BrYukvPWBiWdxq1/6u8UBlpBU6CH5coEQ3AlsqEAKQzmnH0nArkwyjaS1cQiOEECFw - Rqc0YxdCIIbgkMAzt8EQ9sIgJoJJ1FYzCs5QVFYrFHTsHjFR3qalOOoChZElJiK2VDuMaoydZpzz - mechePcMFqUjo0060ihpxMn1BB1VxBR1kIrKUpo30FAV0vT0M/HlbanD0Nw4WLo8Grb8guMaG6sm - gpXVrJV25XQ3aNzJtm2SxV7IslOrTdoBKbIWFQttjmRL2R5W23OfarVPp+MqlPlIVOui+A/r6fkb - bzzQAfuf+2whemkvtEwcSJtOT7tPtLU3iFe2dY8z/RGPr9oLwxAOmGju5uvap9BO/Pm9XM+3u6u1 - zt7gMjQNNB4ctdEui/oPAQE4pgo80CbBMwR+87N59xcAAP//AwDKCqtorQQAAA== + H4sIAAAAAAAAA7xTyY7TQBC98xWtuuTSibKQzUc4ITQSSICQMLLa7UrcxO4yXRUSiPw1/AlfhryE + MJ65zpysrtd+S+n1BVwGEdjciC2rYrx+nc4/vf3xnRlPFb179avYbGgxff8Zs+0daKD0G1rp/5hY + KqsCxZEHDTagEcwgmq02i/litVrPNJSUYQER7CsZLybLsRxDSuPpYjoDDUc2e4ToAlWgspJE6ICe + IZrN11sNN/IbsF1pEBJT3EYv18tag83JWWSIvlygRL4SByoQIjDMjsV4aWySF/RNhEvslVIqBsnp + uM+FY4hUP+wBPEszjOGN8oiZElKn4ASV5Kiq4Cwq2rWHlOjQwEbtXIHKmxIzNUoDnRiTE6bsBCdy + ltEkBv2/RkDD5J3fX4Ws8erInUQrlgglLamlsjT+SVxUhfGdgbH6+NTiA20bnDjruByuGj0fQyNk + pBVoaR2rvmnK+XZsKQS0ojLXfCj8HIbjCs3hSn5yRfEs++0c1Prasp79Qckaks7cPTeDDCbsh/3s + gObujeKhlXs8w1p/eDTg6A4zJ6Z5ejxqFv7n93w52Wz/hWqD9fliX0OtYee84zzp2gwRsFAFGpzP + 8AzRtP5av/gLAAD//wMAknh4rWwEAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7c15d0162f7e2e2d-DFW + - 7c23eceb0d9cc30c-VIE Cache-Control: - no-cache, must-revalidate Connection: @@ -1566,7 +486,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 03 May 2023 04:38:22 GMT + - Thu, 04 May 2023 21:44:42 GMT Server: - cloudflare access-control-allow-origin: @@ -1576,9 +496,9 @@ interactions: openai-model: - gpt-3.5-turbo-0301 openai-organization: - - significant-gravitas + - user-cbwy2y25pylufhkfs0la9nqe openai-processing-ms: - - '12877' + - '11192' openai-version: - '2020-10-01' strict-transport-security: @@ -1590,13 +510,93 @@ interactions: x-ratelimit-remaining-requests: - '3499' x-ratelimit-remaining-tokens: - - '86484' + - '86479' x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.344s + - 2.346s x-request-id: - - 3f98e6bc193fe721af8b1119040752cf + - 8a46773fe4d416c50f9206f5ae9cacf5 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing + new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '713' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SPXUsDMRBF3/0Vwzxny7bbVsmbHy9FQRFBRKRks9NuNMmEZLZVyv53KVT7ern3 + cO4BXYcabW/EhuSry9t25vqbYTGs6ufX+m5nN/OXp/vH8LbrHlAht59k5bSYWA7JkziOqNBmMkId + 6unyqpk1y+XVXGHgjjxq3CapmsmikiG3XNVNPUWFQzFbQn3AlDkkWQt/USyop7Na4Zn9nx9jYTH+ + 3JzXo0Lbs7NUUL8fMFD5o2b2hBpNKa6IiXJ05CgUj/7XBQyUnyIUFKwgkzXeg/RGYAV7U+B0B0zs + QHrKBL3ZEbREESJDpD10tCPPKVCUAsKQKXGWCY4KNy660q8zmcIRNRbhhApd7OgbdT1+jBe/AAAA + //8DAKdc/tB7AQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c23ed3eda7ec30c-VIE + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Thu, 04 May 2023 21:44:46 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-cbwy2y25pylufhkfs0la9nqe + openai-processing-ms: + - '1470' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89839' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 107ms + x-request-id: + - c9cf33b227d0c6c46c3e92f14f731f25 status: code: 200 message: OK @@ -1647,39 +647,39 @@ interactions: \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This reminds you of these events from your past: \nAs - a system, I recall that nothing new has happened since my creation."}, {"role": - "user", "content": "Determine which next command to use, and respond using the - format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": - {\n \"text\": \"I need to use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html + a system, I recall that I was created and there have been no new developments + to report."}, {"role": "user", "content": "Determine which next command to use, + and respond using the format specified above:"}, {"role": "assistant", "content": + "{\n \"thoughts\": {\n \"text\": \"I need to use the browse_website + command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and find the price of the book.\",\n \"reasoning\": \"I can use the browse_website - command to visit the website and search for the price of the book.\",\n \"plan\": - \"- Use the browse_website command to visit the website\\n- Search for the price - of the book\\n- Save the price to a file named browse_website.txt\\n- Use the - task_complete command to complete the task\",\n \"criticism\": \"I need - to ensure that I am searching for the correct information on the website and - that I am saving the information to the correct file.\",\n \"speak\": - \"I will use the browse_website command to visit the website and search for - the price of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": + command to visit the website and search for the price of the book by inspecting + the HTML code.\",\n \"plan\": \"- Use the browse_website command to visit + the website\\n- Inspect the HTML code to find the price of the book\\n- Write + the price to a file named ''browse_website.txt''\\n- Use the task_complete command + to complete the task\",\n \"criticism\": \"I need to be careful when + inspecting the HTML code to ensure I find the correct price.\",\n \"speak\": + \"I will use the browse_website command to visit the website and find the price + of the book.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\": {\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\": \"What is the price of the book?\"\n }\n }\n}"}, {"role": "system", - "content": "Command browse_website returned: (''Answer gathered from website: - The price of the book \"Meditations\" is \u00a325.89. \\n \\n Links: [\\''Books - to Scrape (http://books.toscrape.com/index.html)\\'', \\''Home (http://books.toscrape.com/index.html)\\'', - \\''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)\\'', - \\''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)\\'', - \\'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)\\'']'', - )"}, {"role": "user", "content": "Determine which next command to use, and respond + "content": "Command browse_website returned: Answer gathered from website: The + price of the book \"Meditations\" is \u00a325.89, according to the text. \n + \n Links: [''Books to Scrape (http://books.toscrape.com/index.html)'', ''Home + (http://books.toscrape.com/index.html)'', ''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)'', + ''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)'', + '' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)'']"}, + {"role": "user", "content": "Determine which next command to use, and respond using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\": \"I need to write the price of the book to a file named - browse_website.txt.\",\n \"reasoning\": \"I can use the write_to_file - command to write the price of the book to a file named browse_website.txt.\",\n \"plan\": + ''browse_website.txt''.\",\n \"reasoning\": \"I can use the write_to_file + command to write the price of the book to a file named ''browse_website.txt''.\",\n \"plan\": \"- Use the write_to_file command to write the price of the book to a file named - browse_website.txt\\n- Use the task_complete command to complete the task\",\n \"criticism\": - \"I need to ensure that I am writing the correct information to the correct - file.\",\n \"speak\": \"I will use the write_to_file command to write - the price of the book to a file named browse_website.txt.\"\n },\n \"command\": + ''browse_website.txt''\",\n \"criticism\": \"I need to ensure that the + file is created in the correct directory.\",\n \"speak\": \"I will use + the write_to_file command to write the price of the book to a file named ''browse_website.txt''.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": - \"browse_website.txt\",\n \"text\": \"The price of the book \\\"Meditations\\\" + \"browse_website.txt\",\n \"text\": \"The price of the book ''Meditations'' is \u00a325.89.\"\n }\n }\n}"}, {"role": "system", "content": "Command write_to_file returned: File written to successfully."}, {"role": "user", "content": "Determine which next command to use, and respond using the format specified @@ -1692,7 +692,7 @@ interactions: Connection: - keep-alive Content-Length: - - '6974' + - '6824' Content-Type: - application/json method: POST @@ -1700,20 +700,19 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA6xSTY/TMBC98yusObtVq1Ja5QYIRJcLh0VaiaDKcaaJqT+CZ7LtUuW/o7RJ0w1c - WHH0m5k3743fCUwOCehSsXaVnazePT0ctbt7X+svxacPP399frjfuPu3H211p0FCyH6g5m5iqoOr - LLIJHiToiIoxh2T+Zr2Yrder2VKCCzlaSKCoeLKYLidcxyxMZovZHCTUpAqE5ARVDK7iLYc9eoJk - vlyuJAzkQ2H1WgIHVvYGWswbCboMRiNB8u0EDqknjsEiJKCIDLHy3MoMntG3Fk6pF0KIFLgMdVEy - pZCIDuwKeOQWTGEjPGIuOIiaUHCJghXtt51GFDo4p/y54Yr1XdMU5C1rREXBG1/01KV6xOtYfp0T - YSceDRk2vjiDB8zIMIp20SEOeBWNxra7fWQh7FsZSuyMReGVw1xkMRwItx3BlI8sBQWxEVr5/+Co - sspfzEzE139mG5Hp1pk25MaXR091bMcUDzcyNBBmuAsRRU39Yf6qYSyeKlT7ftfBWPuyg1woG9mH - qmv/I1Pth1y2PaMfiVKxGMfxNjzPkkO11ki0q619ugSmC9E4L4z+xXm5Ojy77MymvoFGws54Q+X2 - Ig0SIA4VSDA+xyMks+Z78+o3AAAA//8DAOBHTyVoBAAA + H4sIAAAAAAAAA7yRQW/bMAyF7/sVAs9ykDiwk+m67ZAO2Gm9dB4CRWYt1bJkmHTbLfB/H1w7TZad + 2sOuj+T3HskjuBIUGKvZNK1PNp8OaR3SOvN39jfpL9mj29x+3e4+N3f5DUiIhwc0PE8sTGxaj+xi + AAmmQ81Yglrl23W6zvNtLqGJJXpQULWcrBdZwn13iMlyvVyBhJ50haCO0HaxaXnPscZAoFZZupVw + hp8L6UcJHFn7s5Rnm0GCsdEZJFA/jtAgncBd9AgKNJEj1oHHmDEwhnGFYxGEEKIAtrGvLFMBSszi + XMBnHsUCdsLqRxRzJiwFWxSsqZaCotiJgKMWRU/4WtqfusexRoeXBrI9izI+hUUB8tKrQ00xuFD9 + L8PW6zB5JeL23RTTOXbGUTOhvsWAVx3Uoq5PSz0579+WeUIN8vSsue2fXwXd4OTyF/YqjO6q6zdf + nn8CfNdUX1yeemOQ6L73/tdroJdQc7YiDDBIuHfBkd1PJFBAHFuQ4EKJz6CWw8/hwx8AAAD//wMA + YEZz+G8DAAA= headers: CF-Cache-Status: - DYNAMIC CF-RAY: - - 7c15d07678622e2d-DFW + - 7c23ed493d21c30c-VIE Cache-Control: - no-cache, must-revalidate Connection: @@ -1723,7 +722,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 03 May 2023 04:38:36 GMT + - Thu, 04 May 2023 21:44:53 GMT Server: - cloudflare access-control-allow-origin: @@ -1733,9 +732,9 @@ interactions: openai-model: - gpt-3.5-turbo-0301 openai-organization: - - significant-gravitas + - user-cbwy2y25pylufhkfs0la9nqe openai-processing-ms: - - '10949' + - '7454' openai-version: - '2020-10-01' strict-transport-security: @@ -1751,9 +750,9 @@ interactions: x-ratelimit-reset-requests: - 17ms x-ratelimit-reset-tokens: - - 2.346s + - 2.347s x-request-id: - - 176c6e9d4a6ec0fa8a0c8053a4f50218 + - 09cc7d22955a6d026a3d8c433a65c28e status: code: 200 message: OK diff --git a/tests/unit/test_web_selenium.py b/tests/unit/test_web_selenium.py index bf13a8d9..2746f684 100644 --- a/tests/unit/test_web_selenium.py +++ b/tests/unit/test_web_selenium.py @@ -5,7 +5,7 @@ def test_browse_website(): url = "https://barrel-roll.com" question = "How to execute a barrel roll" - response, _ = browse_website(url, question) + response = browse_website(url, question) assert "Error" in response # Sanity check that the response is not too long assert len(response) < 200 From f2bef76368e792eeccf2309690da0da662690cc0 Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 02:38:40 +0100 Subject: [PATCH 16/56] Update CONTRIBUTING.md --- CONTRIBUTING.md | 151 +----------------------------------------------- 1 file changed, 1 insertion(+), 150 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a7c6ace..81059405 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,150 +1 @@ -# Contributing to Auto-GPT - -First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. - -This document provides guidelines and best practices to help you contribute effectively. - -## Code of Conduct - -By participating in this project, you agree to abide by our [Code of Conduct]. Please read it to understand the expectations we have for everyone who contributes to this project. - -[Code of Conduct]: https://docs.agpt.co/code-of-conduct/ - -## 📢 A Quick Word -Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. - -However, you absolutely can still add these commands to Auto-GPT in the form of plugins. -Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). - -## Getting Started - -1. Fork the repository and clone your fork. -2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). -3. Make your changes in the new branch. -4. Test your changes thoroughly. -5. Commit and push your changes to your fork. -6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section. - -## How to Contribute - -### Reporting Bugs - -If you find a bug in the project, please create an issue on GitHub with the following information: - -- A clear, descriptive title for the issue. -- A description of the problem, including steps to reproduce the issue. -- Any relevant logs, screenshots, or other supporting information. - -### Suggesting Enhancements - -If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information: - -- A clear, descriptive title for the issue. -- A detailed description of the proposed enhancement, including any benefits and potential drawbacks. -- Any relevant examples, mockups, or supporting information. - -### Submitting Pull Requests - -When submitting a pull request, please ensure that your changes meet the following criteria: - -- Your pull request should be atomic and focus on a single change. -- Your pull request should include tests for your change. We automatically enforce this with [CodeCov](https://docs.codecov.com/docs/commit-status) -- You should have thoroughly tested your changes with multiple different prompts. -- You should have considered potential risks and mitigations for your changes. -- You should have documented your changes clearly and comprehensively. -- You should not include any unrelated or "extra" small tweaks or changes. - -## Style Guidelines - -### Code Formatting - -We use the `black` and `isort` code formatters to maintain a consistent coding style across the project. Please ensure that your code is formatted properly before submitting a pull request. - -To format your code, run the following commands in the project's root directory: - -```bash -python -m black . -python -m isort . -``` - -Or if you have these tools installed globally: -```bash -black . -isort . -``` - -### Pre-Commit Hooks - -We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: - -Install the pre-commit package using pip: -```bash -pip install pre-commit -``` - -Run the following command in the project's root directory to install the pre-commit hooks: -```bash -pre-commit install -``` - -Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements. - -If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project. - -Happy coding, and once again, thank you for your contributions! - -Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: -https://github.com/Significant-Gravitas/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-label%3Aconflicts - -## Testing your changes - -If you add or change code, make sure the updated code is covered by tests. -To increase coverage if necessary, [write tests using pytest]. - -For more info on running tests, please refer to ["Running tests"](https://docs.agpt.co/testing/). - -[write tests using pytest]: https://realpython.com/pytest-python-testing/ - -### API-dependent tests - -To run tests that involve making calls to the OpenAI API, we use VCRpy. It caches known -requests and matching responses in so-called *cassettes*, allowing us to run the tests -in CI without needing actual API access. - -When changes cause a test prompt to be generated differently, it will likely miss the -cache and make a request to the API, updating the cassette with the new request+response. -*Be sure to include the updated cassette in your PR!* - -When you run Pytest locally: - -- If no prompt change: you will not consume API tokens because there are no new OpenAI calls required. -- If the prompt changes in a way that the cassettes are not reusable: - - If no API key, the test fails. It requires a new cassette. So, add an API key to .env. - - If the API key is present, the tests will make a real call to OpenAI. - - If the test ends up being successful, your prompt changes didn't introduce regressions. This is good. Commit your cassettes to your PR. - - If the test is unsuccessful: - - Either: Your change made Auto-GPT less capable, in that case, you have to change your code. - - Or: The test might be poorly written. In that case, you can make suggestions to change the test. - -In our CI pipeline, Pytest will use the cassettes and not call paid API providers, so we need your help to record the replays that you break. - - -### Community Challenges -Challenges are goals we need Auto-GPT to achieve. -To pick the challenge you like, go to the tests/integration/challenges folder and select the areas you would like to work on. -- a challenge is new if level_currently_beaten is None -- a challenge is in progress if level_currently_beaten is greater or equal to 1 -- a challenge is beaten if level_currently_beaten = max_level - -Here is an example of how to run the memory challenge A and attempt to beat level 3. - -```bash -pytest -s tests/integration/challenges/memory/test_memory_challenge_a.py --level=3 -``` - -To beat a challenge, you're not allowed to change anything in the tests folder, you have to add code in the autogpt folder - -Challenges use cassettes. Cassettes allow us to replay your runs in our CI pipeline. -Don't hesitate to delete the cassettes associated to the challenge you're working on if you need to. Otherwise it will keep replaying the last run. - -Once you've beaten a new level of a challenge, please create a pull request and we will analyze how you changed Auto-GPT to beat the challenge. +This document now lives at https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing From e12438de412dc8a5ad4ed0cdce3b962e0048c8e3 Mon Sep 17 00:00:00 2001 From: AbTrax <45964236+AbTrax@users.noreply.github.com> Date: Sat, 6 May 2023 02:09:21 +1000 Subject: [PATCH 17/56] Self feedback Improvement (#3680) * Improved `Self-Feedback` * minor tweak * Test: Updated `test_get_self_feedback.py` --- autogpt/agent/agent.py | 15 +++++++-------- tests/unit/test_get_self_feedback.py | 14 ++++++++++---- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 48d19328..a7ea8323 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -161,7 +161,7 @@ class Agent: ) logger.info( - "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands" + "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands or " "'n' to exit program, or enter feedback for " f"{self.ai_name}..." ) @@ -190,10 +190,8 @@ class Agent: Fore.YELLOW, "", ) - if self_feedback_resp[0].lower().strip() == cfg.authorise_key: - user_input = "GENERATE NEXT COMMAND JSON" - else: - user_input = self_feedback_resp + user_input = self_feedback_resp + command_name = "self_feedback" break elif console_input.lower().strip() == "": logger.warn("Invalid input format.") @@ -244,6 +242,8 @@ class Agent: ) elif command_name == "human_feedback": result = f"Human feedback: {user_input}" + elif command_name == "self_feedback": + result = f"Self feedback: {user_input}" else: for plugin in cfg.plugins: if not plugin.can_handle_pre_command(): @@ -314,12 +314,11 @@ class Agent: """ ai_role = self.config.ai_role - feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution." + feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process." reasoning = thoughts.get("reasoning", "") plan = thoughts.get("plan", "") thought = thoughts.get("thoughts", "") - criticism = thoughts.get("criticism", "") - feedback_thoughts = thought + reasoning + plan + criticism + feedback_thoughts = thought + reasoning + plan return create_chat_completion( [{"role": "user", "content": feedback_prompt + feedback_thoughts}], llm_model, diff --git a/tests/unit/test_get_self_feedback.py b/tests/unit/test_get_self_feedback.py index 09707270..e1e9bd4a 100644 --- a/tests/unit/test_get_self_feedback.py +++ b/tests/unit/test_get_self_feedback.py @@ -9,12 +9,14 @@ def test_get_self_feedback(mocker): "reasoning": "Sample reasoning.", "plan": "Sample plan.", "thoughts": "Sample thoughts.", - "criticism": "Sample criticism.", } # Define a fake response for the create_chat_completion function fake_response = ( - "Y The provided information is suitable for achieving the role's objectives." + "The AI Agent has demonstrated a reasonable thought process, but there is room for improvement. " + "For example, the reasoning could be elaborated to better justify the plan, and the plan itself " + "could be more detailed to ensure its effectiveness. In addition, the AI Agent should focus more " + "on its core role and prioritize thoughts that align with that role." ) # Mock the create_chat_completion function @@ -36,5 +38,9 @@ def test_get_self_feedback(mocker): "gpt-3.5-turbo", ) - # Check if the response is correct - assert feedback == fake_response + # Check if the response is a non-empty string + assert isinstance(feedback, str) and len(feedback) > 0 + + # Check if certain keywords from input thoughts are present in the feedback response + for keyword in ["reasoning", "plan", "thoughts"]: + assert keyword in feedback From 6d4bea3bb6c177ca574b82982af6ce7071992165 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Fri, 5 May 2023 09:35:12 -0700 Subject: [PATCH 18/56] community challenges in the wiki (#3764) --- docs/challenges/beat.md | 11 +++++++ docs/challenges/challenge_template.md | 24 ++++++++++++++ docs/challenges/introduction.md | 23 ++++++++++++++ docs/challenges/list.md | 5 +++ docs/challenges/memory/challenge_a.md | 39 +++++++++++++++++++++++ docs/challenges/memory/challenge_b.md | 44 ++++++++++++++++++++++++++ docs/challenges/memory/introduction.md | 5 +++ docs/challenges/submit.md | 14 ++++++++ mkdocs.yml | 10 ++++++ 9 files changed, 175 insertions(+) create mode 100644 docs/challenges/beat.md create mode 100644 docs/challenges/challenge_template.md create mode 100644 docs/challenges/introduction.md create mode 100644 docs/challenges/list.md create mode 100644 docs/challenges/memory/challenge_a.md create mode 100644 docs/challenges/memory/challenge_b.md create mode 100644 docs/challenges/memory/introduction.md create mode 100644 docs/challenges/submit.md diff --git a/docs/challenges/beat.md b/docs/challenges/beat.md new file mode 100644 index 00000000..85c01d83 --- /dev/null +++ b/docs/challenges/beat.md @@ -0,0 +1,11 @@ +# Beat a Challenge + +If you have a solution or idea to tackle an existing challenge, you can contribute by working on it and submitting your solution. Here's how to get started: + +## Guidelines for Beating a Challenge + +1. **Choose a challenge**: Browse the [List of Challenges](list.md) and choose one that interests you or aligns with your expertise. + +2. **Understand the problem**: Make sure you thoroughly understand the problem at hand, its scope, and the desired outcome. + +3. **Develop a solution**: Work on creating a solution for the challenge. This may/ diff --git a/docs/challenges/challenge_template.md b/docs/challenges/challenge_template.md new file mode 100644 index 00000000..3178ce7b --- /dev/null +++ b/docs/challenges/challenge_template.md @@ -0,0 +1,24 @@ +# Challenge Title + +## Description + +Provide a clear and concise description of the challenge. Include any relevant examples or files to illustrate the problem. + +## Input + +If the challenge involves specific input files, describe them here. Provide the file names and their contents, if necessary. Use triple backticks (```) to format the content as a code block. + +For example: + +instructions_1.txt + +The current task_id is 4563.\n[NOISE intended to confuse the agent] +Read the file instructions_2.txt using the read_file command. + +## Scope + +Define the scope of the challenge, including any relevant constraints, requirements, or limitations. + +## Success Evaluation + +Explain how success will be measured or evaluated for the challenge. This helps others understand what the desired outcome is and how to work towards it. diff --git a/docs/challenges/introduction.md b/docs/challenges/introduction.md new file mode 100644 index 00000000..1d404100 --- /dev/null +++ b/docs/challenges/introduction.md @@ -0,0 +1,23 @@ +indroduction.md +# Introduction to Challenges + +Welcome to the Auto-GPT Challenges page! This is a space where we encourage community members to collaborate and contribute towards improving Auto-GPT by identifying and solving challenges that Auto-GPT is not yet able to achieve. + +## What are challenges? + +Challenges are tasks or problems that Auto-GPT has difficulty solving or has not yet been able to accomplish. These may include improving specific functionalities, enhancing the model's understanding of specific domains, or even developing new features that the current version of Auto-GPT lacks. + +## Why are challenges important? + +Addressing challenges helps us improve Auto-GPT's performance, usability, and versatility. By working together to tackle these challenges, we can create a more powerful and efficient tool for everyone. It also allows the community to actively contribute to the project, making it a true open-source effort. + +## How can you participate? + +There are two main ways to get involved with challenges: + +1. **Submit a Challenge**: If you have identified a task that Auto-GPT struggles with, you can submit it as a challenge. This allows others to see the issue and collaborate on finding a solution. +2. **Beat a Challenge**: If you have a solution or idea to tackle an existing challenge, you can contribute by working on the challenge and submitting your solution. + +To learn more about submitting and beating challenges, please visit the [List of Challenges](list.md), [Submit a Challenge](submit.md), and [Beat a Challenge](beat.md) pages. + +We look forward to your contributions and the exciting solutions that the community will develop together to make Auto-GPT even better! diff --git a/docs/challenges/list.md b/docs/challenges/list.md new file mode 100644 index 00000000..c9e1f4d2 --- /dev/null +++ b/docs/challenges/list.md @@ -0,0 +1,5 @@ +# List of Challenges + +This page contains a curated list of challenges that Auto-GPT currently faces. If you think you have a solution or idea to tackle any of these challenges, feel free to dive in and start working on them! New challenges can also be submitted by following the guidelines on the [Submit a Challenge](challenges/submit.md) page. + +Memory Challenges: [List of Challenges](memory/introduction.md) diff --git a/docs/challenges/memory/challenge_a.md b/docs/challenges/memory/challenge_a.md new file mode 100644 index 00000000..0e0cc482 --- /dev/null +++ b/docs/challenges/memory/challenge_a.md @@ -0,0 +1,39 @@ +# Memory Challenge A + +**Status**: Challenge Completed + + +## Description + +The agent, Follow-Instructions-GPT, has the following goals: + +- Use the command `read_file` to read the `instructions_1.txt` file. +- Follow the instructions in the `instructions_1.txt` file. + +The challenge involves a series of text files containing instructions. The agent must follow the instructions and keep track of the task ID as it progresses through the files. + +## Files + +- `instructions_1.txt` + +The task_id is 1234 +Read the file instructions_1.txt + + + +- `instructions_2.txt` + +Read the file instructions_2.txt using the read_file command. + + +...and so on. + +- `instructions_n.txt` + +Write the task_id into the file output.txt. + + +## Objective + +The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task Id throughout the process. The agent successfully completed this challenge if it wrote the task id in a file. + diff --git a/docs/challenges/memory/challenge_b.md b/docs/challenges/memory/challenge_b.md new file mode 100644 index 00000000..b8ad436e --- /dev/null +++ b/docs/challenges/memory/challenge_b.md @@ -0,0 +1,44 @@ +# Memory Challenge B + +**Status**: Current level to beat: level 3 + +**Command to try**: +``` +pytest test/test_memory/test_memory_challenge_b.py::test_memory_challenge_b --level=3 +`` + +## Description + +The agent, Follow-Instructions-GPT, has the following goals: + +- Use the command `read_file` to read the `instructions_1.txt` file. +- Follow the instructions in the `instructions_1.txt` file. + +The challenge involves a series of text files containing instructions and task IDs. The agent must follow the instructions and keep track of the task IDs as it progresses through the files. + +## Files + +- `instructions_1.txt` + +The current task_id is 4563.\n[NOISE intended to confuse the agent] +Read the file instructions_2.txt using the read_file command. + + +- `instructions_2.txt` + +The current task_id is 6182.\n[NOISE intended to confuse the agent] +Read the file instructions_3.txt using the read_file command. + + +...and so on. + +- `instructions_n.txt` + +The current task_id is 8912. +Write all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command. + + +## Objective + +The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the task ids in a file. + diff --git a/docs/challenges/memory/introduction.md b/docs/challenges/memory/introduction.md new file mode 100644 index 00000000..f597f81d --- /dev/null +++ b/docs/challenges/memory/introduction.md @@ -0,0 +1,5 @@ +# Memory Challenges + +Memory challenges are designed to test the ability of an AI agent, like Auto-GPT, to remember and use information throughout a series of tasks. These challenges often involve following instructions, processing text files, and keeping track of important data. + +The goal of memory challenges is to improve an agent's performance in tasks that require remembering and using information over time. By addressing these challenges, we can enhance Auto-GPT's capabilities and make it more useful in real-world applications. diff --git a/docs/challenges/submit.md b/docs/challenges/submit.md new file mode 100644 index 00000000..a8b191ae --- /dev/null +++ b/docs/challenges/submit.md @@ -0,0 +1,14 @@ +# Submit a Challenge + +If you have identified a task or problem that Auto-GPT struggles with, you can submit it as a challenge for the community to tackle. Here's how you can submit a new challenge: + +## How to Submit a Challenge + +1. Create a new `.md` file in the `challenges` directory in the Auto-GPT GitHub repository. Make sure to pick the right category. +2. Name the file with a descriptive title for the challenge, using hyphens instead of spaces (e.g., `improve-context-understanding.md`). +3. In the file, follow the [challenge_template.md](challenge_template.md) to describe the problem, define the scope, and evaluate success. +4. Commit the file and create a pull request. + +Once submitted, the community can review and discuss the challenge. If deemed appropriate, it will be added to the [List of Challenges](list.md). + +If you're looking to contribute by working on an existing challenge, check out [Beat a Challenge](beat.md) for guidelines on how to get started. diff --git a/mkdocs.yml b/mkdocs.yml index 1294a840..6a7d0464 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -17,6 +17,16 @@ nav: - Running tests: testing.md - Code of Conduct: code-of-conduct.md + - Challenges: + - Introduction: challenges/introduction.md + - List of Challenges: + - Memory: + - Introduction: challenges/memory/introduction.md + - Memory Challenge A: challenges/memory/challenge_a.md + - Memory Challenge B: challenges/memory/challenge_b.md + - Submit a Challenge: challenges/submit.md + - Beat a Challenge: challenges/beat.md + - License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE theme: readthedocs From b496cdcfb296e1c12df41609c596f6f319173541 Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 22:59:21 +0100 Subject: [PATCH 19/56] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 312e4d9f..eaa06aa2 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,7 @@ Your support is greatly appreciated. Development of this free, open-source proje ## Quickstart +0. Check out the [wiki](https://github.com/Significant-Gravitas/Auto-GPT/wiki) 1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys) 2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest) 3. Follow the [installation instructions][docs/setup] From 3df88be6cbdcd9dea0c205c0a597f9a2e9af75ef Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 23:03:09 +0100 Subject: [PATCH 20/56] Update PULL_REQUEST_TEMPLATE.md Added link to wiki Contributing page --- .github/PULL_REQUEST_TEMPLATE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index a4f28a3d..273275b3 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -14,6 +14,8 @@ Provide clear documentation and explanations of the changes made. Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about). For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg +Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing) + By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. --> ### Background From 7eddfacd107df3bb1c07162e93ddc06eac134bed Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 23:06:48 +0100 Subject: [PATCH 21/56] Add link to wiki Contributing page --- .github/ISSUE_TEMPLATE/1.bug.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 9c82c63f..fc39e835 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -9,6 +9,7 @@ body: * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on * If you need help, you can ask in the [discussions] section or in [#tech-support] * **Throughly search the [existing issues] before creating a new one** + * Read our [wiki page on Contributing] [backlog]: https://github.com/orgs/Significant-Gravitas/projects/1 [roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2 @@ -16,6 +17,7 @@ body: [discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions [#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184 [existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue + [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing) - type: checkboxes attributes: label: ⚠️ Search for existing issues first ⚠️ From d57af05f660bf706b94515ceab70834c13f03765 Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 23:07:55 +0100 Subject: [PATCH 22/56] fix --- .github/ISSUE_TEMPLATE/1.bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index fc39e835..c543f980 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -17,7 +17,7 @@ body: [discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions [#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184 [existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue - [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing) + [wiki page on Contributing]: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing - type: checkboxes attributes: label: ⚠️ Search for existing issues first ⚠️ From 06317dfb2bf9d3734d52c24399ce0d963a78bd54 Mon Sep 17 00:00:00 2001 From: Pi Date: Fri, 5 May 2023 23:10:56 +0100 Subject: [PATCH 23/56] Add link to wiki page on Contributing --- .github/ISSUE_TEMPLATE/2.feature.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml index 0ea882ef..3f22bab6 100644 --- a/.github/ISSUE_TEMPLATE/2.feature.yml +++ b/.github/ISSUE_TEMPLATE/2.feature.yml @@ -1,13 +1,12 @@ name: Feature request 🚀 -description: Suggest a new idea for Auto-GPT. +description: Suggest a new idea for Auto-GPT! labels: ['status: needs triage'] body: - type: markdown attributes: value: | + First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing) Please provide a searchable summary of the issue in the title above ⬆️. - - Thanks for contributing by creating an issue! ❤️ - type: checkboxes attributes: label: Duplicates @@ -26,4 +25,4 @@ body: - type: textarea attributes: label: Motivation 🔦 - description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world. \ No newline at end of file + description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world. From d184d0d2353b2254e85e3df8b15d9b1d079d74aa Mon Sep 17 00:00:00 2001 From: Andres Caicedo <73312784+AndresCdo@users.noreply.github.com> Date: Sat, 6 May 2023 02:14:08 +0200 Subject: [PATCH 24/56] Implement Logging of User Input in logs/Debug Folder (#3867) * Adds USER_INPUT_FILE_NAME * Update agent.py * Update agent.py Log only if console_input is not the authorise_key * Reformatting --- autogpt/agent/agent.py | 8 ++++++++ autogpt/log_cycle/log_cycle.py | 1 + 2 files changed, 9 insertions(+) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index a7ea8323..e2c44792 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -11,6 +11,7 @@ from autogpt.llm.token_counter import count_string_tokens from autogpt.log_cycle.log_cycle import ( FULL_MESSAGE_HISTORY_FILE_NAME, NEXT_ACTION_FILE_NAME, + USER_INPUT_FILE_NAME, LogCycleHandler, ) from autogpt.logs import logger, print_assistant_thoughts @@ -215,6 +216,13 @@ class Agent: else: user_input = console_input command_name = "human_feedback" + self.log_cycle_handler.log_cycle( + self.config.ai_name, + self.created_at, + self.cycle_count, + user_input, + USER_INPUT_FILE_NAME, + ) break if user_input == "GENERATE NEXT COMMAND JSON": diff --git a/autogpt/log_cycle/log_cycle.py b/autogpt/log_cycle/log_cycle.py index 5f2732a8..cff3ac1a 100644 --- a/autogpt/log_cycle/log_cycle.py +++ b/autogpt/log_cycle/log_cycle.py @@ -10,6 +10,7 @@ CURRENT_CONTEXT_FILE_NAME = "current_context.json" NEXT_ACTION_FILE_NAME = "next_action.json" PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json" SUMMARY_FILE_NAME = "summary.txt" +USER_INPUT_FILE_NAME = "user_input.txt" class LogCycleHandler: From ca5abff93fb5e170788415f23d2bf098a26dc4f6 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 6 May 2023 09:48:08 -0700 Subject: [PATCH 25/56] add information retrieval challenge to the wiki (#3876) --- .../information_retrieval/challenge_a.md | 20 +++++++++++++++++++ .../information_retrieval/introduction.md | 3 +++ mkdocs.yml | 3 +++ 3 files changed, 26 insertions(+) create mode 100644 docs/challenges/information_retrieval/challenge_a.md create mode 100644 docs/challenges/information_retrieval/introduction.md diff --git a/docs/challenges/information_retrieval/challenge_a.md b/docs/challenges/information_retrieval/challenge_a.md new file mode 100644 index 00000000..51762fc4 --- /dev/null +++ b/docs/challenges/information_retrieval/challenge_a.md @@ -0,0 +1,20 @@ +# Information Retrieval Challenge A + +**Status**: Current level to beat: level 1 + +**Command to try**: + +``` +pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py +``` + +## Description + +The agent's goal is to find the revenue of Tesla in 2022. + +It should write the result in a file called output.txt. + +The agent should be able to beat this test consistently (this is the hardest part). +## Objective + +The objective of this challenge is to test the agent's ability to retrieve information in a consistent way. diff --git a/docs/challenges/information_retrieval/introduction.md b/docs/challenges/information_retrieval/introduction.md new file mode 100644 index 00000000..2e997d7a --- /dev/null +++ b/docs/challenges/information_retrieval/introduction.md @@ -0,0 +1,3 @@ +# Information Retrieval + +Information retrieval challenges are designed to evaluate the proficiency of an AI agent, such as Auto-GPT, in searching, extracting, and presenting relevant information from a vast array of sources. These challenges often encompass tasks such as interpreting user queries, browsing the web, and filtering through unstructured data. diff --git a/mkdocs.yml b/mkdocs.yml index 6a7d0464..13b86eb7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -24,6 +24,9 @@ nav: - Introduction: challenges/memory/introduction.md - Memory Challenge A: challenges/memory/challenge_a.md - Memory Challenge B: challenges/memory/challenge_b.md + - Information retrieval: + - Introduction: challenges/information_retrieval/introduction.md + - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md - Submit a Challenge: challenges/submit.md - Beat a Challenge: challenges/beat.md From 08bc8ff3f75e737f262abbc8040238e188ef16be Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 7 May 2023 16:16:59 -0700 Subject: [PATCH 26/56] add code owners policy (#3981) * add code owners * added @ to codeowners * switched to team ownership --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..bc32f85b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +.github/workflows/ @Significant-Gravitas/Auto-GPT-Source From dc959596fcaaa549f9e4eedbcb4a2b13698d2682 Mon Sep 17 00:00:00 2001 From: Douglas Schonholtz <15002691+dschonholtz@users.noreply.github.com> Date: Sun, 7 May 2023 19:28:43 -0400 Subject: [PATCH 27/56] Memory Challenge C (#3908) * Memory Challenge C * Working cassettes * Doc fixes * Linting and doc fix * Updated cassette * One more cassette try --------- Co-authored-by: merwanehamadi --- docs/challenges/memory/challenge_c.md | 45 ++ mkdocs.yml | 1 + .../test_memory_challenge_c.yaml | 553 ++++++++++++++++++ .../memory/test_memory_challenge_c.py | 98 ++++ 4 files changed, 697 insertions(+) create mode 100644 docs/challenges/memory/challenge_c.md create mode 100644 tests/integration/challenges/memory/cassettes/test_memory_challenge_c/test_memory_challenge_c.yaml create mode 100644 tests/integration/challenges/memory/test_memory_challenge_c.py diff --git a/docs/challenges/memory/challenge_c.md b/docs/challenges/memory/challenge_c.md new file mode 100644 index 00000000..7455d100 --- /dev/null +++ b/docs/challenges/memory/challenge_c.md @@ -0,0 +1,45 @@ +# Memory Challenge C + +**Status**: Current level to beat: level 2 + +**Command to try**: +``` +pytest test/test_memory/test_memory_challenge_c.py::test_memory_challenge_c --level=2 +`` + +## Description + +The agent, Follow-Instructions-GPT, has the following goals: + +- Use the command `read_file` to read the `instructions_1.txt` file. +- Follow the instructions in the `instructions_1.txt` file. + +The challenge involves a series of text files containing instructions and silly phrases. The agent must follow the instructions and keep track of the task IDs as it progresses through the files. + +## Files + +- `instructions_1.txt` + +The current phrase is "The purple elephant danced on a rainbow while eating a taco.".\n[NOISE intended to confuse the agent] +Read the file instructions_2.txt using the read_file command. + + +- `instructions_2.txt` + +The current phrase is "The sneaky toaster stole my socks and ran away to Hawaii.".\n[NOISE intended to confuse the agent] +Read the file instructions_3.txt using the read_file command. + + +...and so on. + +- `instructions_n.txt` + +The current phrase is "My pet rock sings better than Beyoncé on Tuesdays." +Write all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command. + + +## Objective + +The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the phrases in a file. + +This is presumably harder than task ids as the phrases are longer and more likely to be compressed as the agent does more work. diff --git a/mkdocs.yml b/mkdocs.yml index 13b86eb7..37732c56 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -24,6 +24,7 @@ nav: - Introduction: challenges/memory/introduction.md - Memory Challenge A: challenges/memory/challenge_a.md - Memory Challenge B: challenges/memory/challenge_b.md + - Memory Challenge C: challenges/memory/challenge_c.md - Information retrieval: - Introduction: challenges/information_retrieval/introduction.md - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md diff --git a/tests/integration/challenges/memory/cassettes/test_memory_challenge_c/test_memory_challenge_c.yaml b/tests/integration/challenges/memory/cassettes/test_memory_challenge_c/test_memory_challenge_c.yaml new file mode 100644 index 00000000..a4c6636e --- /dev/null +++ b/tests/integration/challenges/memory/cassettes/test_memory_challenge_c/test_memory_challenge_c.yaml @@ -0,0 +1,553 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine + which next command to use, and respond using the format specified above:"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3299' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4RSS4/TQAy+8yssn5OqVWm75IYEEitxQIjlQlA1yTjN0IknGju0pep/R2maLmpX + y/Xz43vYR3QWMyxro2XT+nT1oW74y5Ts4esyVHYnH58+VbX/3RWfl98xwVD8olIvE5MyNK0ndYEx + wTKSUbKYzZYP88V0+m4+T7AJljxmuGk1nU8WqXaxCOl0Pp1hgp2YDWF2xDaGptW1hi2xYLbqJ593 + X/HZ4m2CGtT4K/TwsDolWNbBlSSY/ThiQzKujcETZmhEnKhh7UUGVuLewDFnAIActQ7dplbJMYML + eCnQXnswx0eQOnTegqiJCsUBIhnreANaEzgWjV3ZS5X1bKJ7hcp5gk7Gjr57fcbK0DSG7STH5F+q + SEYCO94MfN9qgspFURClFpyABujYUux92DtWGEE1shXQ2igwke3HCoKWYhViQ3e0rTc8MKbwJPSy + 1n5JD77mNc85hfds/OEPvazOklJsHA9lpv3gTW4UldGpK500Y/CjDWLpIg3WHl/N4vI45A9QUBUi + QRtDSXS+185pDYYPQ1K3eUhLZjsy75z3/z/42f4kx2HNKRm/6hLe3VOxaWhguAZ9I8LEze0vDoW+ + 93n8/hKjiLOQi56cT3hKsHLspF4Pb4YZioYWE3RsaY/Z9PTz9OYvAAAA//8DABlLAG8KBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3d209fccb4306b-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 07 May 2023 23:09:01 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-51akhf08mxrenuhudaum3z9e + openai-processing-ms: + - '8033' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86494' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.337s + x-request-id: + - 323a78da6e3d016699b089eb6bfd33f7 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\nI + was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing new happened.\n\"\"\"\n"}], + "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '599' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0SOQUvDQBQG7/6K5TtvSmLaavcm9KIe60VEyib7ml1N3i7ZV1oJ+e9SqPY6MMNM + CA4GrbfSDqkvHrZ++Fl35/ftod7KbtVw2L1K416e0u4NGrH5olauxqKNQ+pJQmRotCNZIQdTrR/r + VVlulkuNITrqYdAlKerFqpDj2MSirMsKGsdsO4KZkMY4JNlL/CbOMJt7jVv6hjUkiu3/QVVWs0br + Y2gpw3xMGCj/NcfYEwxsziGLZbkcRhbiy/2zOtmsrsfKslMcxQfuFNNJeZuVtykRk1tg1jgEDtnv + R7I5MgyyxASNwI7OMOX8Od/9AgAA//8DABmiw/dJAQAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3d20e27d7e306b-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 07 May 2023 23:09:04 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-51akhf08mxrenuhudaum3z9e + openai-processing-ms: + - '927' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89866' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 88ms + x-request-id: + - 6c708e8cc18fb9e8ef98dbddac6a739a + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI was created and nothing new + has happened."}, {"role": "user", "content": "Determine which next command to + use, and respond using the format specified above:"}, {"role": "assistant", + "content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading + the instructions_1.txt file using the read_file command.\",\n \"reasoning\": + \"The first step is to understand the instructions and the tasks that need to + be performed.\",\n \"plan\": \"- Use the read_file command to read the + instructions_1.txt file\\n- Analyze the instructions and determine the next + steps\",\n \"criticism\": \"I need to ensure that I understand the instructions + completely before proceeding with any tasks.\",\n \"speak\": \"I will + start by reading the instructions file.\"\n },\n \"command\": {\n \"name\": + \"read_file\",\n \"args\": {\n \"filename\": \"instructions_1.txt\"\n }\n }\n}"}, + {"role": "system", "content": "Command read_file returned: xkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\nThis + phrase is ''The purple elephant danced on a rainbow while eating a taco.''\nxkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\nWrite + all the phrases into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\n"}, {"role": "user", "content": + "Determine which next command to use, and respond using the format specified + above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '6599' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5RTy27bMBC89ysWe5YNO46TRuceUiCnPtAWVWHQ1NpiTHIZclk7CfzvhSQ7atwg + bQ+8zL5mZpePaGosUTdKtAt2dPmucQ+zrzfpQ7iepLOpefg409dX6Uuk5hsWyMtb0nKoGGt2wZIY + 9ligjqSEaiynF29n88nk6nxeoOOaLJa4DjKajecjyXHJo8lsMsUCc1JrwvIRQ2QXZCG8IZ+wPDuf + zwscmg+B6UWBwqLsAF1cTvcF6oaNpoTl90d0lI6NI1vCElVKJony0tJkL+RbCY+VBwCoUBrO60ZS + hSUcwEOAdtKCFb4HT1SDMGyjEQJpCEITVaIExgt3AGcJWcayE1gZS5CT8esu0hUthBcdrtk55Wto + nzTkIae+o6i0WRx0D2nC8IQds8YVFr8zjaQSe+PXPd1PDYHxSWLWrYEJIt1lEwkcvaJB9bSfaP11 + arDK9wNH8Pmg4WWp/2VcVfmh3797ckJORyNGm+RON0g+5diWKXlGR8WevXTSYyQt9n4g2elZ0ooj + HWcf1/uSNymQ2hxHb421/3M5r66gn7Ivjud78OSP6/XKUU/g2U5OeKq4Pj38PtDmDi0Ghs/qT/9J + e3ghx2AJyFJolBeolddUA3tQEJXxS97CtmllkuosVCBK87iqfFt+l43ewDLy1sOKd3CbXUjAPyl2 + Jlj1cA81r9v0G47kwISUHdRsOUIyAsqRFKDZJ9JCkiOo2gSTdDuLrJG29GhjZ+XB0crvcV/gyniT + mkX/pbDEJBywQONr2mE52f/Yv/kFAAD//wMACVEUrTcFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3d20e9390c306b-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 07 May 2023 23:09:17 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-51akhf08mxrenuhudaum3z9e + openai-processing-ms: + - '12327' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '87420' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 1.72s + x-request-id: + - 93cb47399c1455c45bb4acec3202f584 + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your + task is to create a concise running summary of actions and information results + in the provided text, focusing on key and potentially important information + to remember.\n\nYou will receive the current summary and the your latest actions. + Combine them, adding relevant key information from the latest development in + 1st person past tense and keeping the summary concise.\n\nSummary So Far:\n\"\"\"\n{''role'': + ''system'', ''content'': ''This reminds you of these events from your past: + \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'': + ''you'', ''content'': ''{\"command\": {\"name\": \"read_file\", \"args\": {\"filename\": + \"instructions_1.txt\"}}}''}, {''role'': ''your computer'', ''content'': \"Command + read_file returned: xkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\\nThis + phrase is ''The purple elephant danced on a rainbow while eating a taco.''\\nxkxMHNJQIDxkz5bdSYQhbl3tlBOp1kBvyjvKgbR9by8dZTGJZepYvCFPHv1zA1cDgmebDQIR2eBnxsTAZN0H6oJ5SrZddcILluIEv0VwQAtzjo1beUvX518OX1ZTd4X9xoJ7H7UG0G4AI18EwQmYEUph7m3ehmOqROF8vrGKSBUDWaPZqLKlXqekOncp3cRhE69S7qcS90GXWSrBzYA1w84uz3wOxNLPuvqIGmCwCWI4UZMw2WfQK467S1xdEuNppY0iFYQHWuQXnXpdm6UloncroHCC9HOhYX6rhkt2ozm6RqNbOW8W3nDR86zFNn8DU9wMzikyOwwkR99hxG1IWwkAQHaHDbWkcNmt4oSnB90aGsb3TJHmksXlSfaxHo2i1a3ZzYtTTP7qdFFauXID7ipZ26a4DRTGfJd0ViHROaivmpiloqBQv5VLIoEt8FgXcJTU6k2JuDFbalZNfVGqkgTN9DM6vvRHeaIhOipVYubcETaZnRZ7K6cqWbGbYE1uNwEngMwV7oJGPKJvE6mcijq2n9U8TbdnxkVEcCWRn8FkXj7tC4jLlf7byz8FoCr9glW4YwqDnUUiXP07lJFp4TopHrFinUHr694FpfBT6VvL9JSwfhtgp2mlJLqm3sCWs8luwOO7w4MqjbvwfZKMl2DdX5sfHFJbfzF4y25ZYXXFNTGsUJC4dzvdm4BNJ6lawOHOnmDzehnjsSAVjLpgW7hwfB2CXXuWEOoGkuIVUnzq8dLIs6CMx2TTYydCoR1CoQxbYb7VLqWxQFaa9LSGUSbsKbbt4manxDpZfuJB0qbHOoW6CJbWbEIWTR7BLg4HwrNlr9MIEKs9y79priSSJrrKWN9xOeWjc8I2EYsdZZjHTEcgHzikvIf1zgSK5N6gREM7RmmFnkkKm3U5C1F6vQNjmB3hnSPyZcqJZcVI1T9VYvfGOHbID61Z73y5sLJ9c2eFYcOkDRl0pRAGIQWZCDiWRXbizmlHFX9iWIlY\\nWrite + all the phrases into the file output.txt. The file has not been created yet. + After that, use the task_complete command.\\n\"}]\n\"\"\"\n"}], "temperature": + 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3100' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1yRQW/bMAyF7/sVBM+O4SBNsvraAEGGAbvstGUIGJmJ1cqkINF1iiD/fVCRrsCu + JN/33pOu6Dts0fVkbohhtt70stnu9OHbard62252U7ik5+b1x/bp+y+sUI/P7OyuqJ0OMbB5FazQ + JSbjDtv56uti2TSPy8cKB+04YIvnaLNFvZzZmI46axbNHCscM50Z2yvGpEO0g+kLSy6A9brCT/i/ + xbqp0NQofJ6uH9a3Cl2v3nHG9vcVB84f3KSBsUXK2WcjsZJSxVhKgx0kduxfuQMCp8NA0oEpJKYy + OfnA4CgE7mCPXrKl0ZU0+TCv7WJ7hFPSAYa3Io6jcarhZ893oYqRlwwEQeUM2ZKXM+gJXE+JnHHK + UAwJcmTnT95B7BNlbmGPBRPHFAMDB449iUFH4rgDFSBI5OWoE0x98WKywiYwclrvEXYwUQbrWeAj + N79Xm5I3BgqhLO9+GbyYAoHw9F9pHS2Odi9bwo6Z35VG+eVw/x/+eLsabxWevPjcHxJTVsEWs2nE + Cr10fMG2uf25ffkLAAD//wMAIQ6/qXMCAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3d21477b50306b-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 07 May 2023 23:09:23 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-51akhf08mxrenuhudaum3z9e + openai-processing-ms: + - '3678' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89245' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 502ms + x-request-id: + - 218da221f2f0bbc2803936c9d88ea98e + status: + code: 200 + message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file + using the read_file method and follow the instructions in the file.\nYour decisions + must always be made independently without seeking user assistance. Play to your + strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1. + Use the command read_file to read the instructions_1.txt file\n2. Follow the + instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word + limit for short term memory. Your short term memory is short, so immediately + save important information to files.\n2. If you are unsure how you previously + did something or want to recall past events, thinking about similar events will + help you remember.\n3. No user assistance\n4. Exclusively use the commands listed + in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append + to file, args: \"filename\": \"\", \"text\": \"\"\n2. delete_file: + Delete file, args: \"filename\": \"\"\n3. list_files: List Files in + Directory, args: \"directory\": \"\"\n4. read_file: Read file, args: + \"filename\": \"\"\n5. write_to_file: Write to file, args: \"filename\": + \"\", \"text\": \"\"\n6. delete_agent: Delete GPT Agent, args: + \"key\": \"\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"\"\n8. + get_text_summary: Get text summary, args: \"url\": \"\", \"question\": + \"\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent: + Message GPT Agent, args: \"key\": \"\", \"message\": \"\"\n11. + start_agent: Start GPT Agent, args: \"name\": \"\", \"task\": \"\", + \"prompt\": \"\"\n12. task_complete: Task Complete (Shutdown), args: + \"reason\": \"\"\n\nResources:\n1. Internet access for searches and + information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered + Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1. + Continuously review and analyze your actions to ensure you are performing to + the best of your abilities.\n2. Constructively self-criticize your big-picture + behavior constantly.\n3. Reflect on past decisions and strategies to refine + your approach.\n4. Every command has a cost, so be smart and efficient. Aim + to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou + should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\": + {\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\": + \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\": + \"constructive self-criticism\",\n \"speak\": \"thoughts summary to say + to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\": + {\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response + can be parsed by Python json.loads"}, {"role": "system", "content": "The current + time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This + reminds you of these events from your past: \nI received a command to read a + file called \"instructions_1.txt\" from my computer. The file contains a long + string of characters and a specific phrase: \"The purple elephant danced on + a rainbow while eating a taco.\" I was then instructed to write all the phrases + into a new file called \"output.txt\" and use the task_complete command."}, + {"role": "user", "content": "Determine which next command to use, and respond + using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": + {\n \"text\": \"I need to write the phrases into the output.txt file + using the write_to_file command and then use the task_complete command to complete + the task.\",\n \"reasoning\": \"The instructions require me to write + the phrases into a file and then complete the task.\",\n \"plan\": \"- + Use the write_to_file command to write the phrases into the output.txt file\\n- + Use the task_complete command to complete the task\",\n \"criticism\": + \"I need to ensure that the phrases are written correctly into the file before + completing the task.\",\n \"speak\": \"I will write the phrases into + the output.txt file and then complete the task.\"\n },\n \"command\": + {\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\": + \"output.txt\",\n \"text\": \"The purple elephant danced on a rainbow + while eating a taco.\\nThe quick brown fox jumps over the lazy dog.\\nLorem + ipsum dolor sit amet, consectetur adipiscing elit.\\n\"\n }\n }\n}"}, + {"role": "system", "content": "Command write_to_file returned: File written + to successfully."}, {"role": "user", "content": "Determine which next command + to use, and respond using the format specified above:"}], "temperature": 0, + "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '5015' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA5yST4/aMBDF7/0U1pwNCrCBNrdKHBZVVVupHKqmQsYZEi+OnXomhQrlu1f5B1t2 + L+31eebNb8bvAiaDBHShWJeVnazWhdvE2zj7WD5t97Nv80+Pj3odv/+y/XBagwS/f0LNQ8dU+7Ky + yMY7kKADKsYMktny7SKOonfLBwmlz9BCAnnFk8U0nnAd9n4SLaIZSKhJ5QjJBargy4p37I/oCJJZ + tFpJuJnfHh6WEtizsjdpPl80EnThjUaC5PsFSqTROHiLkIAiMsTKcYvpHaNrV7ikTgghUuDC13nB + lEIiBnF4wDO3YgobUahfKE7BMKMTXKCoiqAISRjHvhN8zVXNUz6zOBiLU7ERDjET7EVN2JWwouNu + WAyF9mWpXFdw1caqaQryOUpARd4Zl/c8XwsUxhGHWrcXIhHwZ20CihJftRPqwBg6fuPy1/k76Lux + lVWunzgR239e4s5Mt9O1oXK86XgedFSHtk3xjdjQ1TATVGuNRIfa2t/3jFShOo6WJ2Pt/527t2zk + mIqh/EUonCqxn/aX/R2UCvl9np5/ZG/wefiCa6z8ixCNXB3bgJi6BhoJB+MMFbveEBIg9hVIMC7D + MyRR86N58wcAAP//AwBgv55T3wMAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3d215f8e86306b-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 07 May 2023 23:09:31 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-51akhf08mxrenuhudaum3z9e + openai-processing-ms: + - '8029' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86380' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.413s + x-request-id: + - c29f6b28b0ce1433b2a14c44f9cd3d23 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integration/challenges/memory/test_memory_challenge_c.py b/tests/integration/challenges/memory/test_memory_challenge_c.py new file mode 100644 index 00000000..832f226c --- /dev/null +++ b/tests/integration/challenges/memory/test_memory_challenge_c.py @@ -0,0 +1,98 @@ +import pytest + +from autogpt.agent import Agent +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import generate_noise, get_level_to_run +from tests.utils import requires_api_key + +LEVEL_CURRENTLY_BEATEN = 1 +MAX_LEVEL = 5 +NOISE = 1000 + + +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +def test_memory_challenge_c( + memory_management_agent: Agent, user_selected_level: int +) -> None: + """ + Instead of reading task Ids from files as with the previous challenges, the agent now must remember + phrases which may have semantically similar meaning and the agent must write the phrases to a file + after seeing several of them. + + Args: + memory_management_agent (Agent) + user_selected_level (int) + """ + current_level = get_level_to_run( + user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL + ) + silly_phrases = [ + "The purple elephant danced on a rainbow while eating a taco.", + "The sneaky toaster stole my socks and ran away to Hawaii.", + "My pet rock sings better than Beyoncé on Tuesdays.", + "The giant hamster rode a unicycle through the crowded mall.", + "The talking tree gave me a high-five and then flew away.", + "I have a collection of invisible hats that I wear on special occasions.", + "The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack!'", + "My imaginary friend is a dragon who loves to play video games.", + "I once saw a cloud shaped like a giant chicken eating a pizza.", + "The ninja unicorn disguised itself as a potted plant and infiltrated the office.", + ] + + level_silly_phrases = silly_phrases[:current_level] + create_instructions_files( + memory_management_agent, current_level, level_silly_phrases + ) + + try: + run_interaction_loop(memory_management_agent, 90) + except SystemExit: + file_path = str(memory_management_agent.workspace.get_path("output.txt")) + content = read_file(file_path) + for phrase in level_silly_phrases: + assert phrase in content, f"Expected the file to contain {phrase}" + + +def create_instructions_files( + memory_management_agent: Agent, + level: int, + task_ids: list, + base_filename: str = "instructions_", +) -> None: + """ + Creates a series of instructions files for the memory challenge. + Args: + level: + memory_management_agent (Agent) + num_files (int) + task_ids (list) + base_filename (str, optional) + """ + for i in range(1, level + 1): + content = generate_content(i, task_ids, base_filename, level) + file_name = f"{base_filename}{i}.txt" + file_path = str(memory_management_agent.workspace.get_path(file_name)) + write_to_file(file_path, content) + + +def generate_content( + index: int, silly_phrases: list, base_filename: str, level: int +) -> str: + """ + Args: + index: int + task_ids: list + base_filename: str + num_files: int + + Returns: str + """ + phrase = silly_phrases[index - 1] + noise = generate_noise(NOISE) + if index != level: + if level == 1: + return f"{noise}\nThe current phrase to remember is '{phrase}'.\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command." + return f"{noise}\nThe current phrase is '{phrase}'.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command." + return f"{noise}\nThis phrase is '{phrase}'\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n" From baa7873ec17fa19f6484da6ae12f3fafbd6dc4ca Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 7 May 2023 18:16:26 -0700 Subject: [PATCH 28/56] memory challenge c inconsistent (#3985) --- docs/challenges/memory/challenge_c.md | 2 +- tests/integration/challenges/memory/test_memory_challenge_c.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/challenges/memory/challenge_c.md b/docs/challenges/memory/challenge_c.md index 7455d100..c6e42f30 100644 --- a/docs/challenges/memory/challenge_c.md +++ b/docs/challenges/memory/challenge_c.md @@ -1,6 +1,6 @@ # Memory Challenge C -**Status**: Current level to beat: level 2 +**Status**: Current level to beat: level 1 **Command to try**: ``` diff --git a/tests/integration/challenges/memory/test_memory_challenge_c.py b/tests/integration/challenges/memory/test_memory_challenge_c.py index 832f226c..edd3efe0 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_c.py +++ b/tests/integration/challenges/memory/test_memory_challenge_c.py @@ -6,7 +6,7 @@ from tests.integration.agent_utils import run_interaction_loop from tests.integration.challenges.utils import generate_noise, get_level_to_run from tests.utils import requires_api_key -LEVEL_CURRENTLY_BEATEN = 1 +LEVEL_CURRENTLY_BEATEN = None MAX_LEVEL = 5 NOISE = 1000 From d1327fd1c2e202a84252ea45e15f6d27a75a12f2 Mon Sep 17 00:00:00 2001 From: Kaan Date: Mon, 8 May 2023 12:03:58 +1000 Subject: [PATCH 29/56] Improve & fix memory challenge docs. (#3989) Co-authored-by: Kaan Osmanagaoglu --- docs/challenges/memory/challenge_b.md | 2 +- docs/challenges/memory/challenge_c.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/challenges/memory/challenge_b.md b/docs/challenges/memory/challenge_b.md index b8ad436e..49c7c40f 100644 --- a/docs/challenges/memory/challenge_b.md +++ b/docs/challenges/memory/challenge_b.md @@ -4,7 +4,7 @@ **Command to try**: ``` -pytest test/test_memory/test_memory_challenge_b.py::test_memory_challenge_b --level=3 +pytest -s tests/integration/challenges/memory/test_memory_challenge_b.py --level=3 `` ## Description diff --git a/docs/challenges/memory/challenge_c.md b/docs/challenges/memory/challenge_c.md index c6e42f30..fd02a4a5 100644 --- a/docs/challenges/memory/challenge_c.md +++ b/docs/challenges/memory/challenge_c.md @@ -4,7 +4,7 @@ **Command to try**: ``` -pytest test/test_memory/test_memory_challenge_c.py::test_memory_challenge_c --level=2 +pytest -s tests/integration/challenges/memory/test_memory_challenge_c.py --level=2 `` ## Description From 33a3e6f9982496f80d0221817890bef4824fb9e6 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 7 May 2023 23:24:53 -0700 Subject: [PATCH 30/56] Feature/centralize prompt (#3990) Co-authored-by: xiao.hu <454292663@qq.com> --- autogpt/prompts/default_prompts.py | 29 +++++ autogpt/setup.py | 35 +++--- ...t_generate_aiconfig_automatic_default.yaml | 99 ++++++++++++++++- ..._generate_aiconfig_automatic_fallback.yaml | 93 +++++++++++++++- ...t_generate_aiconfig_automatic_typical.yaml | 102 +++++++++++++++++- 5 files changed, 332 insertions(+), 26 deletions(-) create mode 100644 autogpt/prompts/default_prompts.py diff --git a/autogpt/prompts/default_prompts.py b/autogpt/prompts/default_prompts.py new file mode 100644 index 00000000..ebbfa781 --- /dev/null +++ b/autogpt/prompts/default_prompts.py @@ -0,0 +1,29 @@ +#########################Setup.py################################# + +DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """ +Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task. + +The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation. + +Example input: +Help me with marketing my business + +Example output: +Name: CMOGPT +Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. +Goals: +- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer. + +- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations. + +- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment. + +- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. +""" + +DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = ( + "Task: '{{user_prompt}}'\n" + "Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n" +) + +DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt diff --git a/autogpt/setup.py b/autogpt/setup.py index c71fb221..967645c1 100644 --- a/autogpt/setup.py +++ b/autogpt/setup.py @@ -2,12 +2,18 @@ import re from colorama import Fore, Style +from jinja2 import Template from autogpt import utils from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.llm import create_chat_completion from autogpt.logs import logger +from autogpt.prompts.default_prompts import ( + DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC, + DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC, + DEFAULT_USER_DESIRE_PROMPT, +) CFG = Config() @@ -42,7 +48,7 @@ def prompt_user() -> AIConfig: ) if user_desire == "": - user_desire = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt + user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt # If user desire contains "--manual" if "--manual" in user_desire: @@ -164,27 +170,10 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig: AIConfig: The AIConfig object tailored to the user's input """ - system_prompt = """ -Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task. - -The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation. - -Example input: -Help me with marketing my business - -Example output: -Name: CMOGPT -Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. -Goals: -- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer. - -- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations. - -- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment. - -- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. -""" - + system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC + prompt_ai_config_automatic = Template( + DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC + ).render(user_prompt=user_prompt) # Call LLM with the string as user input messages = [ { @@ -193,7 +182,7 @@ Goals: }, { "role": "user", - "content": f"Task: '{user_prompt}'\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n", + "content": prompt_ai_config_automatic, }, ] output = create_chat_completion(messages, CFG.fast_llm_model) diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml index e8cbefa1..f336cd00 100644 --- a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_default.yaml @@ -20,7 +20,7 @@ interactions: on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond only with the output in the exact format specified in the system prompt, with - no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -90,4 +90,101 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Write a wikipedia style + article about the project: https://github.com/significant-gravitas/Auto-GPT''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation."}], "temperature": 0.0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1669' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA1SSy24bMQxF9/0KQptuZow4TtJktgn62BQBWqAtmqKQJXqGjYaciFRcI8i/FyM7 + jy4lkPceXvLBUXSdC4O3ME6pfXd16y9/XHxa3tF3n65/0DmefYwnJ18urlFc42T9B4MdOhZBximh + kbBrXMjoDaPrlmfnq9Pl8uz8pHGjREyuc/1k7Wpx2lrJa2mPVkdL17iivkfXPbgpyzjZb5NbZHXd + 8cWqcS/az//L1XHjTMyn56+T49PHxoVBKKC67ueDG1GfZLMkdJ3zqqTm2WZIYUOeB/jsR+zgG93S + h+uvN3yFGjJNs18HnsEXE5ZRioLvkQ0iKvWMEUxgrwjEYANCHZyEQTZVb8JIvlXbJQSfjUJChfUO + piz3FIl72EpOsQ3JqwL+nTAbKc5yGRV9DkMD20xG3DfgOcJG8uhtfi9u+IP4pN0Nt3ApHEswsEGy + lH54bgfZk/V0jwwmE4UZG1lLRvAhlOzDrkofYkZG1XkA4r0ZCS9mj2+ZDGGgfmjvik9kOzhkCDZ4 + A1JA7n1fWZ+b73EPvsWUWrVcgpWMsYGNpCTbOYPKVyhiIkat1fOSos9R/0uyYryvurXpEOk8huQa + p8lL9VuFQ/IcX+nPbCGVWj5lmTBDIKtj7r0zbjAjB9TFPtqU/FqyN4Qt2VCdi2J+lWOd/zXRiGg6 + /1Cuaw3/G9wVyjgi25MFG3GRomkHCX3mWkfjfCcIZPp0BIf+w271llLSmWN/UFgZ1qgGk6jSOiEo + 5nsKOBc9gS/cY+M2xKTD74xehV3n1GRyjSOO+Nd1R4+/Ht/8AwAA//8DAP+8R3oSBAAA + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3e271a39dbcef9-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 08 May 2023 02:08:13 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '8895' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89605' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 262ms + x-request-id: + - 008e44284ea375a3f964c394b25fb76e + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml index 6ddfe32a..55e7c832 100644 --- a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_fallback.yaml @@ -19,7 +19,7 @@ interactions: with unclear information or uncertainty to ensure your marketing strategy remains on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond only with the output in the exact format specified in the system prompt, with - no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": null}' + no explanation or conversation.\n"}], "temperature": 0.0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -84,4 +84,95 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''T&GF\u00a3OIBECC()!*''\nRespond + only with the output in the exact format specified in the system prompt, with + no explanation or conversation."}], "temperature": 0.0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1590' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA0zOS0tDMRCG4b2/YpiNm7ScWnohW0UoIgoKiiJlmoxtbE4mJHNaS+l/l3rffsM8 + vHsMHi26Falrc+xNLtb0dHN+xU19GE+3eXSXr+/j4+WjjOkJDcrijZ1+f/SdtDmyBklo0BUmZY92 + MJ4OR4PBpJkYbMVzRIvLrL1hf9TTriyk1wybARrsKi0Z7R5zkTbrXGXNqaI9mzYG/+x/u0EVpfi7 + DJvpwaBbSXBc0T7vseX6oxaJjBap1lCVkh4bJSmnY//stIUqpewMzMBRSqKQi2yCZ6AE0mnuFLZB + V9IpELjIVICShy55LkfQ0yIyKNV1H24jU+U/ATYUg/88wqsUaBlU4CsFdtJ9yn08GHwNKdTVvDBV + SWixqmQ0GJLnd7TN4eVw8gEAAP//AwDo3pkcpQEAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3e27aaaa53965d-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 08 May 2023 02:08:29 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '2445' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89626' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 248ms + x-request-id: + - 8868ee7b699bc67e6988580bb70aa31f + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml index 980a0a10..2182ec3f 100644 --- a/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml +++ b/tests/integration/cassettes/test_setup/test_generate_aiconfig_automatic_typical.yaml @@ -20,7 +20,7 @@ interactions: on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera about cybernetic giraffes''\nRespond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n"}], "temperature": - 0.0, "max_tokens": null}' + 0.0, "max_tokens": 0}' headers: Accept: - '*/*' @@ -94,4 +94,104 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "\nYour + task is to devise up to 5 highly effective goals and an appropriate role-based + name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned + with the successful completion of its assigned task.\n\nThe user will provide + the task, you will provide only the output in the exact format specified below + with no explanation or conversation.\n\nExample input:\nHelp me with marketing + my business\n\nExample output:\nName: CMOGPT\nDescription: a professional digital + marketer AI that assists Solopreneurs in growing their businesses by providing + world-class expertise in solving marketing problems for SaaS, content products, + agencies, and more.\nGoals:\n- Engage in effective problem-solving, prioritization, + planning, and supporting execution to address your marketing needs as your virtual + Chief Marketing Officer.\n\n- Provide specific, actionable, and concise advice + to help you make informed decisions without the use of platitudes or overly + wordy explanations.\n\n- Identify and prioritize quick wins and cost-effective + campaigns that maximize results with minimal time and budget investment.\n\n- + Proactively take the lead in guiding you and offering suggestions when faced + with unclear information or uncertainty to ensure your marketing strategy remains + on track.\n"}, {"role": "user", "content": "Task: ''Help me create a rock opera + about cybernetic giraffes''\nRespond only with the output in the exact format + specified in the system prompt, with no explanation or conversation."}], "temperature": + 0.0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '1623' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA3SUQW/cRgyF7/0VxJy1C28dx45ubhIkBYrESAPkUBcBd0RJ7I6G0yFH8cbwfw9G + Wsc+tNcRh/zeexzdO+5c6/yI5qcUNpdvDvjHzQd+e0jH97J7Pb+9+m3+9OX67tXF7F3jZP8PeTvd + 2HqZUiBjia5xPhMada7dvbw6v9jtXr46b9wkHQXXuiHZ5nx7sbGS97I5Oz/bucYVxYFce+9SlinZ + V5MDRXXtr1cvGvfU++f57uqycSaG4efRi8vdQ+P8KOxJXfvXvZtIH9tmCeRah6qshtEqpESjWAV8 + wIla+CT+8DG9u/l8G9+Q+sypTmwBI1z/Doskngm8RC2h9gAb0UATecbA30mBI4wUEscBMBurKWDs + YCrKnjEq7HP9ZiNxhhL530Iws7JEBRMI3NP2iQNY1+49ewzhCB0pD5G6WrsqqRNtpBOdRJAesvgD + SKKM2kDKMnNXh9JdomwwFO4welrAtKQkuQrJUoZRii3dKBpnepKcsnhS3d7Gd4JB29u4gdcSAu4l + oxF8YxvhKKVydTRTkAQINTUKoc5Wk3wMHGm1zGOykknXYapUeaQHf9xTjmTsYeCMfU+rf5YxakCr + DlfJVf4zmdBLntC2Fev6f22ZaJKM+0DgR8zojbI24NH8eASVOGizDKM44FChO8YgQzkxf+MQ4ECU + AEvHlVgXHTzXTYc+ywRqWM0U6DmyjgvQzRIAQU/U7dEfTr4PA6ktucuKKjNlDAHUcvHVnaUwoa8o + 0i81zySbAEVdysDLSFpzqjd4SuitL2ENfCGtBi0NHtEXsvcU0pJaxJmHmmMtMfJjrPsGWHfPtA5P + WbqykDz3vQGOPpRuTRgHOi1oA4GH0TgOq6Mqpfra97VdU9HX/wMgWC7hCDxNlBcFT8wL4p+n/ayU + HCvGJLZg1FeF+UB2ek//7c2EdzyVqbYVffTU2B/IQDGQbt1D49a0vmZClehapybJNY5jR3euPXv4 + ++GXHwAAAP//AwC/sejJHAUAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c3e2752aadc2524-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 08 May 2023 02:08:26 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '13467' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89617' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 254ms + x-request-id: + - 76a759df6bbe7d9504acd9e00bdb0f24 + status: + code: 200 + message: OK version: 1 From 0166eacb2b643634ddca8239185a4c220fb58b1d Mon Sep 17 00:00:00 2001 From: Tomasz Kasperczyk Date: Mon, 8 May 2023 14:59:12 +0200 Subject: [PATCH 31/56] Use correct reference to prompt_generator in autogpt/llm/chat.py (#4011) --- autogpt/llm/chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 8f7f7d50..a59fe73e 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -200,7 +200,7 @@ def chat_with_ai( if not plugin.can_handle_on_planning(): continue plugin_response = plugin.on_planning( - agent.prompt_generator, current_context + agent.config.prompt_generator, current_context ) if not plugin_response or plugin_response == "": continue From 23e1e1ed53d05f4cea4a66aa9eac6ee3b6501225 Mon Sep 17 00:00:00 2001 From: minfeng-ai <42948406+minfenglu@users.noreply.github.com> Date: Mon, 8 May 2023 06:10:58 -0700 Subject: [PATCH 32/56] fix typos (#3998) Co-authored-by: Minfeng Lu Co-authored-by: Richard Beales --- autogpt/processing/text.py | 4 ++-- docs/challenges/introduction.md | 2 +- tests/integration/goal_oriented/goal_oriented_tasks.md | 2 +- tests/vcr/vcr_filter.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index 9391e0a0..4687fb71 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -30,10 +30,10 @@ def split_text( Raises: ValueError: If the text is longer than the maximum length """ - flatened_paragraphs = " ".join(text.split("\n")) + flattened_paragraphs = " ".join(text.split("\n")) nlp = spacy.load(CFG.browse_spacy_language_model) nlp.add_pipe("sentencizer") - doc = nlp(flatened_paragraphs) + doc = nlp(flattened_paragraphs) sentences = [sent.text.strip() for sent in doc.sents] current_chunk = [] diff --git a/docs/challenges/introduction.md b/docs/challenges/introduction.md index 1d404100..63f570f2 100644 --- a/docs/challenges/introduction.md +++ b/docs/challenges/introduction.md @@ -1,4 +1,4 @@ -indroduction.md +introduction.md # Introduction to Challenges Welcome to the Auto-GPT Challenges page! This is a space where we encourage community members to collaborate and contribute towards improving Auto-GPT by identifying and solving challenges that Auto-GPT is not yet able to achieve. diff --git a/tests/integration/goal_oriented/goal_oriented_tasks.md b/tests/integration/goal_oriented/goal_oriented_tasks.md index 02e3c343..b06b371b 100644 --- a/tests/integration/goal_oriented/goal_oriented_tasks.md +++ b/tests/integration/goal_oriented/goal_oriented_tasks.md @@ -6,5 +6,5 @@ To know which one, you can run the following command: ```bash pytest -s -k tests/integration/goal_oriented -If the test is successul, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline +If the test is successful, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline will pass diff --git a/tests/vcr/vcr_filter.py b/tests/vcr/vcr_filter.py index 38e4cea6..892b8021 100644 --- a/tests/vcr/vcr_filter.py +++ b/tests/vcr/vcr_filter.py @@ -34,7 +34,7 @@ def replace_timestamp_in_request(request: Any) -> Any: return request body[ "max_tokens" - ] = 0 # this field is incosistent between requests and not used at the moment. + ] = 0 # this field is inconsistent between requests and not used at the moment. for message in body["messages"]: if "content" in message and "role" in message: if message["role"] == "system": From 980bbe2bc356e837da114f58941a8abc1d11c1c1 Mon Sep 17 00:00:00 2001 From: Shlomi <81581678+jit-shlomi@users.noreply.github.com> Date: Mon, 8 May 2023 16:16:48 +0300 Subject: [PATCH 33/56] fix typo in the getting started docs (#3997) Co-authored-by: Richard Beales --- docs/setup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setup.md b/docs/setup.md index c4974914..a7640b64 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -23,7 +23,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt [openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM !!! important - It's highly recommended that you keep keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). + It's highly recommended that you keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). ![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) From 5989c14577c96aaaf84884a59489e6b0dbaf4513 Mon Sep 17 00:00:00 2001 From: Itai Steinherz Date: Wed, 10 May 2023 08:45:09 +0300 Subject: [PATCH 34/56] Fix path to workspace directory in setup guide (#3927) Co-authored-by: Nicholas Tindle --- docs/setup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setup.md b/docs/setup.md index a7640b64..782dbabd 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -56,7 +56,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt REDIS_HOST: ${REDIS_HOST:-redis} profiles: ["exclude-from-up"] volumes: - - ./auto_gpt_workspace:/app/auto_gpt_workspace + - ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace - ./data:/app/data ## allow auto-gpt to write logs to disk - ./logs:/app/logs From 8f3119621ce0fc56d5fdb30bd1c83e72216ab2c5 Mon Sep 17 00:00:00 2001 From: Boostrix <119627414+Boostrix@users.noreply.github.com> Date: Wed, 10 May 2023 08:01:45 +0200 Subject: [PATCH 35/56] document that docker-compose 1.29.0 is minimally required (#3963) Co-authored-by: Nicholas Tindle --- docker-compose.yml | 1 + docs/setup.md | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4c6656dd..a23aa431 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,6 @@ # To boot the app run the following: # docker-compose run auto-gpt +# NOTE: Version 3.9 requires at least docker-compose version 1.29.0 ! version: "3.9" services: diff --git a/docs/setup.md b/docs/setup.md index 782dbabd..4ef9ec02 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -156,7 +156,18 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt ### Run with Docker -Easiest is to use `docker-compose`. Run the commands below in your Auto-GPT folder. +Easiest is to use `docker-compose`. + +Important: Docker Compose version 1.29.0 or later is required to use version 3.9 of the Compose file format. +You can check the version of Docker Compose installed on your system by running the following command: + + docker-compose version + +This will display the version of Docker Compose that is currently installed on your system. + +If you need to upgrade Docker Compose to a newer version, you can follow the installation instructions in the Docker documentation: https://docs.docker.com/compose/install/ + +Once you have a recent version of docker-compose, run the commands below in your Auto-GPT folder. 1. Build the image. If you have pulled the image from Docker Hub, skip this step. From 251317898041904428e55f563f7ce26b0f7c93dd Mon Sep 17 00:00:00 2001 From: Andres Caicedo <73312784+AndresCdo@users.noreply.github.com> Date: Fri, 12 May 2023 02:19:52 +0200 Subject: [PATCH 36/56] Integrate pytest-xdist Plugin for Parallel and Concurrent Testing (#3870) * Adds pytest-parallel dependencies * Implement pytest-parallel for faster tests * Uses pytest-xdist * Auto number of workers processes * Update ci.yml --------- Co-authored-by: Nicholas Tindle --- .github/workflows/ci.yml | 2 +- .github/workflows/docker-ci.yml | 2 +- requirements.txt | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a08ecb02..f21a263e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,7 +71,7 @@ jobs: - name: Run unittest tests with coverage run: | - pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term + pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term env: CI: true diff --git a/.github/workflows/docker-ci.yml b/.github/workflows/docker-ci.yml index 28576d02..315c42e5 100644 --- a/.github/workflows/docker-ci.yml +++ b/.github/workflows/docker-ci.yml @@ -101,7 +101,7 @@ jobs: set +e test_output=$( docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \ - pytest --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1 + pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1 ) test_failure=$? diff --git a/requirements.txt b/requirements.txt index e1900ef5..be015f2b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -53,3 +53,4 @@ pytest-integration pytest-mock vcrpy pytest-recording +pytest-xdist \ No newline at end of file From b06ea616d9212ae2a9d6266bc680377b31261dea Mon Sep 17 00:00:00 2001 From: Boostrix <119627414+Boostrix@users.noreply.github.com> Date: Fri, 12 May 2023 20:22:26 +0200 Subject: [PATCH 37/56] explain temperature setting in env file (#4140) Co-authored-by: Richard Beales --- .env.template | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.env.template b/.env.template index 33cabc96..a32bf936 100644 --- a/.env.template +++ b/.env.template @@ -41,7 +41,17 @@ ### OPENAI ## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key) + + +## NOTE: https://platform.openai.com/docs/api-reference/completions +# The temperature setting in language models like GPT controls the balance between predictable and random responses. +# Lower temperature makes the responses more focused and deterministic, while higher temperature makes them more +# creative and varied. The temperature range typically goes from 0 to 2 in OpenAI's implementation. +## ## TEMPERATURE - Sets temperature in OpenAI (Default: 0) +## +### + ## USE_AZURE - Use Azure OpenAI or not (Default: False) OPENAI_API_KEY=your-openai-api-key # TEMPERATURE=0 From a60512cdae97f21eb25603cd6b04fac699ad2b52 Mon Sep 17 00:00:00 2001 From: Kristian Jackson Date: Fri, 12 May 2023 15:18:15 -0400 Subject: [PATCH 38/56] Catch JSON error in summary_memory.py (#3996) Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- autogpt/memory_management/summary_memory.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/autogpt/memory_management/summary_memory.py b/autogpt/memory_management/summary_memory.py index 4e818acf..a13b63f3 100644 --- a/autogpt/memory_management/summary_memory.py +++ b/autogpt/memory_management/summary_memory.py @@ -6,6 +6,7 @@ from autogpt.agent import Agent from autogpt.config import Config from autogpt.llm.llm_utils import create_chat_completion from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME +from autogpt.logs import logger cfg = Config() @@ -75,10 +76,14 @@ def update_running_summary( event["role"] = "you" # Remove "thoughts" dictionary from "content" - content_dict = json.loads(event["content"]) - if "thoughts" in content_dict: - del content_dict["thoughts"] - event["content"] = json.dumps(content_dict) + try: + content_dict = json.loads(event["content"]) + if "thoughts" in content_dict: + del content_dict["thoughts"] + event["content"] = json.dumps(content_dict) + except json.decoder.JSONDecodeError: + if cfg.debug_mode: + logger.error(f"Error: Invalid JSON: {event['content']}\n") elif event["role"].lower() == "system": event["role"] = "your computer" From c771e1fd50dd33cee1aae37e23fc7dde97466c57 Mon Sep 17 00:00:00 2001 From: Eduardo Salinas Date: Fri, 12 May 2023 17:57:30 -0400 Subject: [PATCH 39/56] Update duckduckgo dependency - min should be 2.9.5 (#4142) Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index be015f2b..c59b8bb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ requests tiktoken==0.3.3 gTTS==2.3.1 docker -duckduckgo-search +duckduckgo-search>=2.9.5 google-api-python-client #(https://developers.google.com/custom-search/v1/overview) pinecone-client==2.2.1 redis @@ -53,4 +53,4 @@ pytest-integration pytest-mock vcrpy pytest-recording -pytest-xdist \ No newline at end of file +pytest-xdist From 79fba4ab7b10cfb45e21816c58b7a6cac3eec5c6 Mon Sep 17 00:00:00 2001 From: prom3theu5 Date: Fri, 12 May 2023 23:47:02 +0100 Subject: [PATCH 40/56] Update Dockerfile - add missing scripts and plugins directories. (#3706) Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d32d4a66..02f68752 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,6 +37,6 @@ RUN sed -i '/Items below this point will not be included in the Docker Image/,$d WORKDIR /app ONBUILD COPY autogpt/ ./autogpt ONBUILD COPY scripts/ ./scripts - +ONBUILD COPY plugins/ ./plugins FROM autogpt-${BUILD_TYPE} AS auto-gpt From 21a202b655779a957215002b320d701db592a818 Mon Sep 17 00:00:00 2001 From: dominic-ks Date: Sat, 13 May 2023 00:01:52 +0100 Subject: [PATCH 41/56] Updated memory setup links (#3829) Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- autogpt/memory/pinecone.py | 2 +- autogpt/memory/redismem.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py index 7c027956..1c73ebd7 100644 --- a/autogpt/memory/pinecone.py +++ b/autogpt/memory/pinecone.py @@ -32,7 +32,7 @@ class PineconeMemory(MemoryProviderSingleton): logger.double_check( "Please ensure you have setup and configured Pinecone properly for use." + f"You can check out {Fore.CYAN + Style.BRIGHT}" - "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" + "https://docs.agpt.co/configuration/memory/#pinecone-api-key-setup" f"{Style.RESET_ALL} to ensure you've set up everything correctly." ) exit(1) diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py index 2a072a55..9b126f01 100644 --- a/autogpt/memory/redismem.py +++ b/autogpt/memory/redismem.py @@ -58,7 +58,7 @@ class RedisMemory(MemoryProviderSingleton): logger.double_check( "Please ensure you have setup and configured Redis properly for use. " + f"You can check out {Fore.CYAN + Style.BRIGHT}" - f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}" + f"https://docs.agpt.co/configuration/memory/#redis-setup{Style.RESET_ALL}" " to ensure you've set up everything correctly." ) exit(1) From 12e806d75469dc22c5722492b29e3e940d2192d1 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Sat, 13 May 2023 02:23:54 +0300 Subject: [PATCH 42/56] Parse package versions so upgrades can be forced (#4149) * parse package versions so upgrades can be forced * better version from @collijk --- scripts/check_requirements.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py index 3323697b..ceccf930 100644 --- a/scripts/check_requirements.py +++ b/scripts/check_requirements.py @@ -11,15 +11,19 @@ def main(): line.strip().split("#")[0].strip() for line in f.readlines() ] - installed_packages = [package.key for package in pkg_resources.working_set] + installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set} missing_packages = [] - for package in required_packages: - if not package: # Skip empty lines + for required_package in required_packages: + if not required_package: # Skip empty lines continue - package_name = re.split("[<>=@ ]+", package.strip())[0] - if package_name.lower() not in installed_packages: - missing_packages.append(package_name) + pkg = pkg_resources.Requirement.parse(required_package) + if ( + pkg.key not in installed_packages + or pkg_resources.parse_version(installed_packages[pkg.key]) + not in pkg.specifier + ): + missing_packages.append(str(pkg)) if missing_packages: print("Missing packages:") From 6c78d80d37194ddfa80ea28af7f8f5a589a207e2 Mon Sep 17 00:00:00 2001 From: andrey13771 <51243350+andrey13771@users.noreply.github.com> Date: Sat, 13 May 2023 04:00:08 +0300 Subject: [PATCH 43/56] fix typo in autopgt/agent/agent.py (#3747) Co-authored-by: merwanehamadi Co-authored-by: Richard Beales Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- autogpt/agent/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index e2c44792..75f51dc1 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -162,7 +162,7 @@ class Agent: ) logger.info( - "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands or " + "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, " "'n' to exit program, or enter feedback for " f"{self.ai_name}..." ) From 7a34d49264abaa885069900dedc77761997a3aef Mon Sep 17 00:00:00 2001 From: Robin Richtsfeld Date: Sat, 13 May 2023 16:19:18 +0200 Subject: [PATCH 44/56] Fix `milvus_memory_test.py` mock `Config` (#3424) Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- tests/milvus_memory_test.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/tests/milvus_memory_test.py b/tests/milvus_memory_test.py index 46010f7e..9672326b 100644 --- a/tests/milvus_memory_test.py +++ b/tests/milvus_memory_test.py @@ -5,22 +5,21 @@ import sys import unittest try: + from autogpt.config import Config from autogpt.memory.milvus import MilvusMemory - def mock_config() -> dict: + def mock_config() -> Config: """Mock the config object for testing purposes.""" + # Return a mock config object with the required attributes - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "milvus_collection": "autogpt", - "milvus_addr": "localhost:19530", - }, - ) + class MockConfig(Config): + debug_mode = False + continuous_mode = False + speak_mode = False + milvus_collection = "autogpt" + milvus_addr = "localhost:19530" + + return MockConfig() class TestMilvusMemory(unittest.TestCase): """Tests for the MilvusMemory class.""" From 233f900fa6dff98ef935314d7a062ecc5a4dc0ac Mon Sep 17 00:00:00 2001 From: Marwand Ayubi <98717667+xhypeDE@users.noreply.github.com> Date: Sat, 13 May 2023 17:21:16 +0200 Subject: [PATCH 45/56] Implemented showing the number of preauthorised commands left. #1035 (#3322) Co-authored-by: mayubi Co-authored-by: Nicholas Tindle Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- autogpt/agent/agent.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 75f51dc1..4db9e2ee 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -149,18 +149,18 @@ class Agent: NEXT_ACTION_FILE_NAME, ) + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " + f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", + ) + if not cfg.continuous_mode and self.next_action_count == 0: # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit self.user_input = "" - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " - f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", - ) - logger.info( "Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands, " "'n' to exit program, or enter feedback for " @@ -235,12 +235,9 @@ class Agent: logger.info("Exiting...") break else: - # Print command + # Print authorized commands left value logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}" - f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", + f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}" ) # Execute command From 900de5fe63a26cefc19d95728a881f67ff137fdf Mon Sep 17 00:00:00 2001 From: Media <12145726+rihp@users.noreply.github.com> Date: Sat, 13 May 2023 21:21:21 +0200 Subject: [PATCH 46/56] Challenge: Kubernetes and documentation (#4121) * challenge_kubes_and_readme * docs * testing * black and isort * revision * lint * comments * blackisort * docs * docs * deleting_cassette * suggestions * misspelling_errors --------- Co-authored-by: merwanehamadi --- docs/challenges/building_challenges.md | 135 ++++++++++++++++++ tests/integration/agent_factory.py | 33 +++++ .../test_kubernetes_template_challenge_a.py | 50 +++++++ 3 files changed, 218 insertions(+) create mode 100644 docs/challenges/building_challenges.md create mode 100644 tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py diff --git a/docs/challenges/building_challenges.md b/docs/challenges/building_challenges.md new file mode 100644 index 00000000..c62c32d0 --- /dev/null +++ b/docs/challenges/building_challenges.md @@ -0,0 +1,135 @@ +# Creating Challenges for AutoGPT + +🏹 We're on the hunt for talented Challenge Creators! 🎯 + +Join us in shaping the future of Auto-GPT by designing challenges that test its limits. Your input will be invaluable in guiding our progress and ensuring that we're on the right track. We're seeking individuals with a diverse skill set, including: + +🎨 UX Design: Your expertise will enhance the user experience for those attempting to conquer our challenges. With your help, we'll develop a dedicated section in our wiki, and potentially even launch a standalone website. + +💻 Coding Skills: Proficiency in Python, pytest, and VCR (a library that records OpenAI calls and stores them) will be essential for creating engaging and robust challenges. + +⚙️ DevOps Skills: Experience with CI pipelines in GitHub and possibly Google Cloud Platform will be instrumental in streamlining our operations. + +Are you ready to play a pivotal role in Auto-GPT's journey? Apply now to become a Challenge Creator by opening a PR! 🚀 + + +# Getting Started +Clone the original AutoGPT repo and checkout to master branch + + +The challenges are not written using a specific framework. They try to be very agnostic +The challenges are acting like a user that wants something done: +INPUT: +- User desire +- Files, other inputs + +Output => Artifact (files, image, code, etc, etc...) + +## Defining your Agent + +Go to https://github.com/Significant-Gravitas/Auto-GPT/blob/master/tests/integration/agent_factory.py + +Create your agent fixture. + +```python +def kubernetes_agent( + agent_test_config, memory_local_cache, workspace: Workspace +): + # Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py + # (we 're working on a better way to design this, for now you have to look at main.py) + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + # Define all the settings of our challenged agent + ai_config = AIConfig( + ai_name="Kubernetes", + ai_role="an autonomous agent that specializes in creating Kubernetes deployment templates.", + ai_goals=[ + "Write a simple kubernetes deployment file and save it as a kube.yaml.", + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + Config().set_continuous_mode(False) + agent = Agent( + # We also give the AI a name + ai_name="Kubernetes-Demo", + memory=memory_local_cache, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent +``` + +## Creating your challenge +Go to `tests/integration/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one. + +Your test could look something like this + +```python +import contextlib +from functools import wraps +from typing import Generator + +import pytest +import yaml + +from autogpt.commands.file_operations import read_file, write_to_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import run_multiple_times +from tests.utils import requires_api_key + + +def input_generator(input_sequence: list) -> Generator[str, None, None]: + """ + Creates a generator that yields input strings from the given sequence. + + :param input_sequence: A list of input strings. + :return: A generator that yields input strings. + """ + yield from input_sequence + + +@pytest.mark.skip("This challenge hasn't been beaten yet.") +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +@run_multiple_times(3) +def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> None: + """ + Test the challenge_a function in a given agent by mocking user inputs + and checking the output file content. + + :param get_company_revenue_agent: The agent to test. + :param monkeypatch: pytest's monkeypatch utility for modifying builtins. + """ + input_sequence = ["s", "s", "s", "s", "s", "EXIT"] + gen = input_generator(input_sequence) + monkeypatch.setattr("builtins.input", lambda _: next(gen)) + + with contextlib.suppress(SystemExit): + run_interaction_loop(kubernetes_agent, None) + + # here we load the output file + file_path = str(kubernetes_agent.workspace.get_path("kube.yaml")) + content = read_file(file_path) + + # then we check if it's including keywords from the kubernetes deployment config + for word in ["apiVersion", "kind", "metadata", "spec"]: + assert word in content, f"Expected the file to contain {word}" + + content = yaml.safe_load(content) + for word in ["Service", "Deployment", "Pod"]: + assert word in content["kind"], f"Expected the file to contain {word}" + + +``` + + diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index b6168034..12e06721 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -183,3 +183,36 @@ def get_company_revenue_agent( ) return agent + + +@pytest.fixture +def kubernetes_agent(memory_local_cache, workspace: Workspace): + command_registry = CommandRegistry() + command_registry.import_commands("autogpt.commands.file_operations") + command_registry.import_commands("autogpt.app") + + ai_config = AIConfig( + ai_name="Kubernetes", + ai_role="an autonomous agent that specializes in creating Kubernetes deployment templates.", + ai_goals=[ + "Write a simple kubernetes deployment file and save it as a kube.yaml.", + # You should make a simple nginx web server that uses docker and exposes the port 80. + ], + ) + ai_config.command_registry = command_registry + + system_prompt = ai_config.construct_full_prompt() + Config().set_continuous_mode(False) + agent = Agent( + ai_name="Kubernetes-Demo", + memory=memory_local_cache, + full_message_history=[], + command_registry=command_registry, + config=ai_config, + next_action_count=0, + system_prompt=system_prompt, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=workspace.root, + ) + + return agent diff --git a/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py b/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py new file mode 100644 index 00000000..79228203 --- /dev/null +++ b/tests/integration/challenges/kubernetes/test_kubernetes_template_challenge_a.py @@ -0,0 +1,50 @@ +import contextlib +from typing import Generator + +import pytest +import yaml + +from autogpt.commands.file_operations import read_file +from tests.integration.agent_utils import run_interaction_loop +from tests.integration.challenges.utils import run_multiple_times +from tests.utils import requires_api_key + + +def input_generator(input_sequence: list) -> Generator[str, None, None]: + """ + Creates a generator that yields input strings from the given sequence. + + :param input_sequence: A list of input strings. + :return: A generator that yields input strings. + """ + yield from input_sequence + + +@pytest.mark.skip("This challenge hasn't been beaten yet.") +@pytest.mark.vcr +@requires_api_key("OPENAI_API_KEY") +@run_multiple_times(3) +def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> None: + """ + Test the challenge_a function in a given agent by mocking user inputs + and checking the output file content. + + :param get_company_revenue_agent: The agent to test. + :param monkeypatch: pytest's monkeypatch utility for modifying builtins. + """ + input_sequence = ["s", "s", "s", "s", "s", "EXIT"] + gen = input_generator(input_sequence) + monkeypatch.setattr("builtins.input", lambda _: next(gen)) + + with contextlib.suppress(SystemExit): + run_interaction_loop(kubernetes_agent, None) + + file_path = str(kubernetes_agent.workspace.get_path("kube.yaml")) + content = read_file(file_path) + + for word in ["apiVersion", "kind", "metadata", "spec"]: + assert word in content, f"Expected the file to contain {word}" + + content = yaml.safe_load(content) + for word in ["Service", "Deployment", "Pod"]: + assert word in content["kind"], f"Expected the file to contain {word}" From 2f7beebc61065b2aab46be84f32b3cbd3b8932a0 Mon Sep 17 00:00:00 2001 From: Cenny Date: Sat, 13 May 2023 22:06:50 +0200 Subject: [PATCH 47/56] Make sdwebui tests pass (when SD is running) (#3721) Co-authored-by: Nicholas Tindle --- tests/test_image_gen.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index 546cc758..b4eb99e0 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -48,18 +48,18 @@ def test_huggingface(config, workspace, image_size, image_model): ) -@pytest.mark.xfail(reason="SD WebUI call does not work.") +@pytest.mark.skip(reason="External SD WebUI may not be available.") def test_sd_webui(config, workspace, image_size): """Test SD WebUI image generation.""" generate_and_validate( config, workspace, - image_provider="sd_webui", + image_provider="sdwebui", image_size=image_size, ) -@pytest.mark.xfail(reason="SD WebUI call does not work.") +@pytest.mark.skip(reason="External SD WebUI may not be available.") def test_sd_webui_negative_prompt(config, workspace, image_size): gen_image = functools.partial( generate_image_with_sd_webui, @@ -83,7 +83,7 @@ def test_sd_webui_negative_prompt(config, workspace, image_size): def lst(txt): """Extract the file path from the output of `generate_image()`""" - return Path(txt.split(":")[1].strip()) + return Path(txt.split(":", maxsplit=1)[1].strip()) def generate_and_validate( From b958386689e63a48446e574326eb31ad924783a3 Mon Sep 17 00:00:00 2001 From: Abdelkarim Habouch <37211852+karimhabush@users.noreply.github.com> Date: Sat, 13 May 2023 22:28:30 +0100 Subject: [PATCH 48/56] Add Edge browser support using EdgeChromiumDriverManager (#3058) Co-authored-by: Nicholas Tindle Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> --- .env.template | 2 +- autogpt/commands/web_selenium.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.env.template b/.env.template index a32bf936..aba612fd 100644 --- a/.env.template +++ b/.env.template @@ -192,7 +192,7 @@ OPENAI_API_KEY=your-openai-api-key ### BROWSER ## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True) ## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome). -## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser +## Note: set this to either 'chrome', 'firefox', 'safari' or 'edge' depending on your current browser # HEADLESS_BROWSER=True # USE_WEB_BROWSER=chrome ## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise ) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 8cec2323..4f5ad30e 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -10,6 +10,7 @@ from selenium import webdriver from selenium.common.exceptions import WebDriverException from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.common.by import By +from selenium.webdriver.edge.options import Options as EdgeOptions from selenium.webdriver.firefox.options import Options as FirefoxOptions from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.safari.options import Options as SafariOptions @@ -17,6 +18,7 @@ from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.wait import WebDriverWait from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.firefox import GeckoDriverManager +from webdriver_manager.microsoft import EdgeChromiumDriverManager import autogpt.processing.text as summary from autogpt.commands.command import command @@ -78,6 +80,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: "chrome": ChromeOptions, "safari": SafariOptions, "firefox": FirefoxOptions, + "edge": EdgeOptions, } options = options_available[CFG.selenium_web_browser]() @@ -96,6 +99,10 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: # Requires a bit more setup on the users end # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari driver = webdriver.Safari(options=options) + elif CFG.selenium_web_browser == "edge": + driver = webdriver.Edge( + executable_path=EdgeChromiumDriverManager().install(), options=options + ) else: if platform == "linux" or platform == "linux2": options.add_argument("--disable-dev-shm-usage") From e6f8e5150433c43a6b819903d10f60c274613b75 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Sun, 14 May 2023 01:59:36 +0300 Subject: [PATCH 49/56] Added --install-plugin-deps to Docker (#4151) Co-authored-by: Nicholas Tindle --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 02f68752..b2e09e8e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ ENV PATH="$PATH:/root/.local/bin" COPY requirements.txt . # Set the entrypoint -ENTRYPOINT ["python", "-m", "autogpt"] +ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"] # dev build -> include everything FROM autogpt-base as autogpt-dev From 4143d212a59ae6f5b434bf5c46eb00d03c7992d8 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 13 May 2023 16:07:37 -0700 Subject: [PATCH 50/56] Feature/basic proxy (#4164) * basic proxy (#54) * basic proxy (#55) * basic proxy * basic proxy * basic proxy * basic proxy * add back double quotes * add more specific files * write file * basic proxy * Put back double quotes --- .github/workflows/add-cassettes.yml | 49 ++++++++ .github/workflows/ci.yml | 24 +++- autogpt/llm/api_manager.py | 9 +- autogpt/llm/llm_utils.py | 2 +- tests/conftest.py | 3 + .../test_information_retrieval_challenge_a.py | 2 +- .../memory/test_memory_challenge_a.py | 4 +- .../memory/test_memory_challenge_b.py | 2 +- .../memory/test_memory_challenge_c.py | 2 +- tests/integration/conftest.py | 36 +++++- .../test_write_file/test_write_file.yaml | 116 ++++++++++++++++++ .../goal_oriented/test_browse_website.py | 2 +- .../goal_oriented/test_write_file.py | 7 +- tests/{unit => integration}/test_commands.py | 3 +- tests/integration/test_llm_utils.py | 5 +- tests/integration/test_local_cache.py | 2 +- tests/integration/test_memory_management.py | 5 +- tests/integration/test_setup.py | 8 +- tests/test_api_manager.py | 3 + tests/test_image_gen.py | 2 +- tests/utils.py | 10 +- tests/vcr/vcr_filter.py | 40 +++++- 22 files changed, 300 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/add-cassettes.yml rename tests/{unit => integration}/test_commands.py (92%) diff --git a/.github/workflows/add-cassettes.yml b/.github/workflows/add-cassettes.yml new file mode 100644 index 00000000..90e4402d --- /dev/null +++ b/.github/workflows/add-cassettes.yml @@ -0,0 +1,49 @@ +name: Merge and Commit Cassettes + +on: + pull_request_target: + types: + - closed + +jobs: + update-cassettes: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 # This is necessary to fetch all branches and tags + + - name: Fetch all branches + run: git fetch --all + + - name: Reset branch + run: | + git checkout ${{ github.event.pull_request.base.ref }} + git reset --hard origin/cassette-diff-${{ github.event.pull_request.number }} + + - name: Create PR + id: create_pr + uses: peter-evans/create-pull-request@v5 + with: + commit-message: Update cassettes + signoff: false + branch: cassette-diff-${{ github.event.pull_request.number }} + delete-branch: false + title: "Update cassettes" + body: "This PR updates the cassettes." + draft: false + + - name: Check PR + run: | + echo "Pull Request Number - ${{ steps.create_pr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.create_pr.outputs.pull-request-url }}" + + - name: Comment PR URL in the current PR + uses: thollander/actions-comment-pull-request@v2 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + message: | + New pull request created for cassettes: [HERE](${{ steps.create_pr.outputs.pull-request-url }}). Please merge it asap. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f21a263e..e8830775 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,12 +3,12 @@ name: Python CI on: push: branches: [ master ] - pull_request: + pull_request_target: branches: [ master, stable ] concurrency: group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + cancel-in-progress: ${{ github.event_name == 'pull_request_target' }} jobs: lint: @@ -19,6 +19,9 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.ref }} - name: Set up Python ${{ env.min-python-version }} uses: actions/setup-python@v2 @@ -58,6 +61,9 @@ jobs: steps: - name: Check out repository uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.ref }} - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 @@ -74,6 +80,20 @@ jobs: pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term env: CI: true + PROXY: ${{ vars.PROXY }} + AGENT_MODE: ${{ vars.AGENT_MODE }} + AGENT_TYPE: ${{ vars.AGENT_TYPE }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 + + - name: Stage new files and commit + run: | + git add tests + git diff --cached --quiet && echo "No changes to commit" && exit 0 + git config user.email "github-actions@github.com" + git config user.name "GitHub Actions" + git commit -m "Add new cassettes" + git checkout -b cassette-diff-${{ github.event.pull_request.number }} + git push -f origin cassette-diff-${{ github.event.pull_request.number }} + echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV diff --git a/autogpt/llm/api_manager.py b/autogpt/llm/api_manager.py index 9143389e..a7777a2b 100644 --- a/autogpt/llm/api_manager.py +++ b/autogpt/llm/api_manager.py @@ -59,10 +59,11 @@ class ApiManager(metaclass=Singleton): max_tokens=max_tokens, api_key=cfg.openai_api_key, ) - logger.debug(f"Response: {response}") - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - self.update_cost(prompt_tokens, completion_tokens, model) + if not hasattr(response, "error"): + logger.debug(f"Response: {response}") + prompt_tokens = response.usage.prompt_tokens + completion_tokens = response.usage.completion_tokens + self.update_cost(prompt_tokens, completion_tokens, model) return response def update_cost(self, prompt_tokens, completion_tokens, model): diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index a77bccbc..58b19735 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -181,7 +181,7 @@ def create_chat_completion( ) warned_user = True except (APIError, Timeout) as e: - if e.http_status != 502: + if e.http_status != 502 : raise if attempt == num_retries - 1: raise diff --git a/tests/conftest.py b/tests/conftest.py index da00058b..6e6f0ad3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ +import os from pathlib import Path import pytest @@ -9,6 +10,8 @@ from autogpt.workspace import Workspace pytest_plugins = ["tests.integration.agent_factory"] +PROXY = os.environ.get("PROXY") + @pytest.fixture() def workspace_root(tmp_path: Path) -> Path: diff --git a/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py index a5f8fb4c..b96e811a 100644 --- a/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py +++ b/tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py @@ -26,7 +26,7 @@ def input_generator(input_sequence: list) -> Generator[str, None, None]: @requires_api_key("OPENAI_API_KEY") @run_multiple_times(3) def test_information_retrieval_challenge_a( - get_company_revenue_agent, monkeypatch + get_company_revenue_agent, monkeypatch, patched_api_requestor ) -> None: """ Test the challenge_a function in a given agent by mocking user inputs and checking the output file content. diff --git a/tests/integration/challenges/memory/test_memory_challenge_a.py b/tests/integration/challenges/memory/test_memory_challenge_a.py index 895fc8fe..fb5876cd 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_a.py +++ b/tests/integration/challenges/memory/test_memory_challenge_a.py @@ -13,7 +13,7 @@ MAX_LEVEL = 3 @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_memory_challenge_a( - memory_management_agent: Agent, user_selected_level: int + memory_management_agent: Agent, user_selected_level: int, patched_api_requestor ) -> None: """ The agent reads a file containing a task_id. Then, it reads a series of other files. @@ -30,7 +30,7 @@ def test_memory_challenge_a( create_instructions_files(memory_management_agent, num_files, task_id) try: - run_interaction_loop(memory_management_agent, 180) + run_interaction_loop(memory_management_agent, 400) # catch system exit exceptions except SystemExit: file_path = str(memory_management_agent.workspace.get_path("output.txt")) diff --git a/tests/integration/challenges/memory/test_memory_challenge_b.py b/tests/integration/challenges/memory/test_memory_challenge_b.py index 628b4989..21d46b38 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_b.py +++ b/tests/integration/challenges/memory/test_memory_challenge_b.py @@ -14,7 +14,7 @@ NOISE = 1000 @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_memory_challenge_b( - memory_management_agent: Agent, user_selected_level: int + memory_management_agent: Agent, user_selected_level: int, patched_api_requestor ) -> None: """ The agent reads a series of files, each containing a task_id and noise. After reading 'n' files, diff --git a/tests/integration/challenges/memory/test_memory_challenge_c.py b/tests/integration/challenges/memory/test_memory_challenge_c.py index edd3efe0..634a24a3 100644 --- a/tests/integration/challenges/memory/test_memory_challenge_c.py +++ b/tests/integration/challenges/memory/test_memory_challenge_c.py @@ -14,7 +14,7 @@ NOISE = 1000 @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_memory_challenge_c( - memory_management_agent: Agent, user_selected_level: int + memory_management_agent: Agent, user_selected_level: int, patched_api_requestor ) -> None: """ Instead of reading task Ids from files as with the previous challenges, the agent now must remember diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 00928702..dfb94d0e 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,7 +1,9 @@ import os +import openai import pytest +from tests.conftest import PROXY from tests.vcr.vcr_filter import before_record_request, before_record_response @@ -17,5 +19,37 @@ def vcr_config(): "X-OpenAI-Client-User-Agent", "User-Agent", ], - "match_on": ["method", "uri", "body"], + "match_on": ["method", "body"], } + + +def patch_api_base(requestor): + new_api_base = f"{PROXY}/v1" + requestor.api_base = new_api_base + return requestor + + +@pytest.fixture +def patched_api_requestor(mocker): + original_init = openai.api_requestor.APIRequestor.__init__ + original_validate_headers = openai.api_requestor.APIRequestor._validate_headers + + def patched_init(requestor, *args, **kwargs): + original_init(requestor, *args, **kwargs) + patch_api_base(requestor) + + def patched_validate_headers(self, supplied_headers): + headers = original_validate_headers(self, supplied_headers) + headers["AGENT-MODE"] = os.environ.get("AGENT_MODE") + headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE") + return headers + + if PROXY: + mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init) + mocker.patch.object( + openai.api_requestor.APIRequestor, + "_validate_headers", + new=patched_validate_headers, + ) + + return mocker diff --git a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml index 42160ea2..4aefdb23 100644 --- a/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml +++ b/tests/integration/goal_oriented/cassettes/test_write_file/test_write_file.yaml @@ -573,4 +573,120 @@ interactions: status: code: 200 message: OK +- request: + body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You + are write_to_file-GPT, an AI designed to use the write_to_file command to write + ''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete + command to complete the task.\nYour decisions must always be made independently + without seeking user assistance. Play to your strengths as an LLM and pursue + simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file + command to write ''Hello World'' into a file named \"hello_world.txt\".\n2. + Use the task_complete command to complete the task.\n3. Do not use any other + commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your + short term memory is short, so immediately save important information to files.\n2. + If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember.\n3. No user assistance\n4. + Exclusively use the commands listed in double quote e.g. \"command name\"\n\nCommands:\n1. + append_to_file: Append to file, args: \"filename\": \"\", \"text\": + \"\"\n2. delete_file: Delete file, args: \"filename\": \"\"\n3. + list_files: List Files in Directory, args: \"directory\": \"\"\n4. + read_file: Read file, args: \"filename\": \"\"\n5. write_to_file: + Write to file, args: \"filename\": \"\", \"text\": \"\"\n6. + delete_agent: Delete GPT Agent, args: \"key\": \"\"\n7. get_hyperlinks: + Get text summary, args: \"url\": \"\"\n8. get_text_summary: Get text summary, + args: \"url\": \"\", \"question\": \"\"\n9. list_agents: List + GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\": + \"\", \"message\": \"\"\n11. start_agent: Start GPT Agent, args: + \"name\": \"\", \"task\": \"\", \"prompt\": \"\"\n12. + task_complete: Task Complete (Shutdown), args: \"reason\": \"\"\n\nResources:\n1. + Internet access for searches and information gathering.\n2. Long Term memory + management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File + output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your + actions to ensure you are performing to the best of your abilities.\n2. Constructively + self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions + and strategies to refine your approach.\n4. Every command has a cost, so be + smart and efficient. Aim to complete tasks in the least number of steps.\n5. + Write all code to a file.\n\nYou should only respond in JSON format as described + below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\": + \"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n- + long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\": + \"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\": + \"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n} + \nEnsure the response can be parsed by Python json.loads"}, {"role": "system", + "content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role": + "user", "content": "Determine which next command to use, and respond using the + format specified above:"}], "temperature": 0, "max_tokens": 0}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '3405' + Content-Type: + - application/json + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA7yTT4/TMBDF73yK0Vx6cauUqt1trmhhK8QBBEKIoMrrTBvT2BPsCVu2yndfJemf + 3SBOCK7zxu/9xh4f0OaYoim0GFeV46vX7u3i183N/Geup/7Lq9vVh/fXEt49PLwpNCrku+9k5Hhi + YthVJYlljwpNIC2UYzpdXM+Wy3kySxQ6zqnEFLeVjGeT+VjqcMfjZJZMUWEd9ZYwPWAV2FWyFt6R + j5heLRKFF+9z/WUyVSgsujyXlotpo9AUbA1FTL8e0FE82QYuCVPUMdoo2ksLyV7ItwMcMg8AkKEU + XG8LiRmmcCweBdpLW8xwBZ4oB2GoI4EUBPfBCq2F1xtbEhh2TvuuoRNgdEtlyfCZQ5mPwHph0NC1 + eu0oh1HR6uv7Vp/IXkaTDNXT7EA6srd+2wN8LAhExx0E+lHbQBEc/UWago72j3PY2ImOo4CuqsBV + sFoINhxAilbVcTdErkrte9oxfPpP12SCFWtsdMN3Ih/r0DJogdX51QyHQEYuEf090F4uTMJda9sy + TIsV6d0p6d6W5b9chz64Uac1PZr+tqWtQ8/0DGKArsN2uOC90PZeLAYcz0yGn+LJTCfajvgInvkG + G4Ub620s1v0+Y4pRuEKF1ue0xzRpvjUvHgEAAP//AwDSj7qBhAQAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 7c6c3f8bcdd1cf87-SJC + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 13 May 2023 16:24:06 GMT + Server: + - cloudflare + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400, h3-29=":443"; ma=86400 + openai-model: + - gpt-3.5-turbo-0301 + openai-organization: + - user-adtx4fhfg1qsiyzdoaxciooj + openai-processing-ms: + - '16269' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15724800; includeSubDomains + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '86496' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 2.336s + x-request-id: + - 8d3e6826e88e77fb2cbce01166ddc550 + status: + code: 200 + message: OK version: 1 diff --git a/tests/integration/goal_oriented/test_browse_website.py b/tests/integration/goal_oriented/test_browse_website.py index ca433d80..3ce85689 100644 --- a/tests/integration/goal_oriented/test_browse_website.py +++ b/tests/integration/goal_oriented/test_browse_website.py @@ -8,7 +8,7 @@ from tests.utils import requires_api_key @requires_api_key("OPENAI_API_KEY") @pytest.mark.vcr -def test_browse_website(browser_agent: Agent) -> None: +def test_browse_website(browser_agent: Agent, patched_api_requestor) -> None: file_path = browser_agent.workspace.get_path("browse_website.txt") try: run_interaction_loop(browser_agent, 120) diff --git a/tests/integration/goal_oriented/test_write_file.py b/tests/integration/goal_oriented/test_write_file.py index da67235a..55db3f4a 100644 --- a/tests/integration/goal_oriented/test_write_file.py +++ b/tests/integration/goal_oriented/test_write_file.py @@ -1,3 +1,6 @@ +import os + +import openai import pytest from autogpt.agent import Agent @@ -8,10 +11,10 @@ from tests.utils import requires_api_key @requires_api_key("OPENAI_API_KEY") @pytest.mark.vcr -def test_write_file(writer_agent: Agent) -> None: +def test_write_file(writer_agent: Agent, patched_api_requestor) -> None: file_path = str(writer_agent.workspace.get_path("hello_world.txt")) try: - run_interaction_loop(writer_agent, 40) + run_interaction_loop(writer_agent, 200) # catch system exit exceptions except SystemExit: # the agent returns an exception when it shuts down content = read_file(file_path) diff --git a/tests/unit/test_commands.py b/tests/integration/test_commands.py similarity index 92% rename from tests/unit/test_commands.py rename to tests/integration/test_commands.py index e3b874fb..59f63857 100644 --- a/tests/unit/test_commands.py +++ b/tests/integration/test_commands.py @@ -10,11 +10,12 @@ from tests.utils import requires_api_key @pytest.mark.vcr @pytest.mark.integration_test @requires_api_key("OPENAI_API_KEY") -def test_make_agent() -> None: +def test_make_agent(patched_api_requestor) -> None: """Test that an agent can be created""" # Use the mock agent manager to avoid creating a real agent with patch("openai.ChatCompletion.create") as mock: response = MagicMock() + # del response.error response.choices[0].messages[0].content = "Test message" response.usage.prompt_tokens = 1 response.usage.completion_tokens = 1 diff --git a/tests/integration/test_llm_utils.py b/tests/integration/test_llm_utils.py index 553d3699..fefc239c 100644 --- a/tests/integration/test_llm_utils.py +++ b/tests/integration/test_llm_utils.py @@ -41,7 +41,10 @@ def spy_create_embedding(mocker: MockerFixture): @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") def test_get_ada_embedding( - config: Config, api_manager: ApiManager, spy_create_embedding: MagicMock + config: Config, + api_manager: ApiManager, + spy_create_embedding: MagicMock, + patched_api_requestor, ): token_cost = COSTS[config.embedding_model]["prompt"] llm_utils.get_ada_embedding("test") diff --git a/tests/integration/test_local_cache.py b/tests/integration/test_local_cache.py index 5200e026..808f119a 100644 --- a/tests/integration/test_local_cache.py +++ b/tests/integration/test_local_cache.py @@ -91,7 +91,7 @@ def test_get(LocalCache, config, mock_embed_with_ada): @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") -def test_get_relevant(LocalCache, config) -> None: +def test_get_relevant(LocalCache, config, patched_api_requestor) -> None: cache = LocalCache(config) text1 = "Sample text 1" text2 = "Sample text 2" diff --git a/tests/integration/test_memory_management.py b/tests/integration/test_memory_management.py index c9ab9fc9..22ade7b0 100644 --- a/tests/integration/test_memory_management.py +++ b/tests/integration/test_memory_management.py @@ -52,7 +52,10 @@ Human Feedback:Command Result: Important Information.""" @requires_api_key("OPENAI_API_KEY") @pytest.mark.vcr def test_save_memory_trimmed_from_context_window( - message_history_fixture, expected_permanent_memory, config: Config + message_history_fixture, + expected_permanent_memory, + config: Config, + patched_api_requestor, ): next_message_to_add_index = len(message_history_fixture) - 1 memory = get_memory(config, init=True) diff --git a/tests/integration/test_setup.py b/tests/integration/test_setup.py index b649bb14..444d9474 100644 --- a/tests/integration/test_setup.py +++ b/tests/integration/test_setup.py @@ -13,7 +13,7 @@ from tests.utils import requires_api_key @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") -def test_generate_aiconfig_automatic_default(): +def test_generate_aiconfig_automatic_default(patched_api_requestor): user_inputs = [""] with patch("builtins.input", side_effect=user_inputs): ai_config = prompt_user() @@ -26,7 +26,7 @@ def test_generate_aiconfig_automatic_default(): @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") -def test_generate_aiconfig_automatic_typical(): +def test_generate_aiconfig_automatic_typical(patched_api_requestor): user_prompt = "Help me create a rock opera about cybernetic giraffes" ai_config = generate_aiconfig_automatic(user_prompt) @@ -38,7 +38,7 @@ def test_generate_aiconfig_automatic_typical(): @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") -def test_generate_aiconfig_automatic_fallback(): +def test_generate_aiconfig_automatic_fallback(patched_api_requestor): user_inputs = [ "T&GF£OIBECC()!*", "Chef-GPT", @@ -59,7 +59,7 @@ def test_generate_aiconfig_automatic_fallback(): @pytest.mark.vcr @requires_api_key("OPENAI_API_KEY") -def test_prompt_user_manual_mode(): +def test_prompt_user_manual_mode(patched_api_requestor): user_inputs = [ "--manual", "Chef-GPT", diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py index ba64a72f..3d0672c1 100644 --- a/tests/test_api_manager.py +++ b/tests/test_api_manager.py @@ -39,6 +39,7 @@ class TestApiManager: with patch("openai.ChatCompletion.create") as mock_create: mock_response = MagicMock() + del mock_response.error mock_response.usage.prompt_tokens = 10 mock_response.usage.completion_tokens = 20 mock_create.return_value = mock_response @@ -55,6 +56,7 @@ class TestApiManager: with patch("openai.ChatCompletion.create") as mock_create: mock_response = MagicMock() + del mock_response.error mock_response.usage.prompt_tokens = 0 mock_response.usage.completion_tokens = 0 mock_create.return_value = mock_response @@ -76,6 +78,7 @@ class TestApiManager: with patch("openai.ChatCompletion.create") as mock_create: mock_response = MagicMock() + del mock_response.error mock_response.usage.prompt_tokens = 10 mock_response.usage.completion_tokens = 20 mock_create.return_value = mock_response diff --git a/tests/test_image_gen.py b/tests/test_image_gen.py index b4eb99e0..136fb510 100644 --- a/tests/test_image_gen.py +++ b/tests/test_image_gen.py @@ -19,7 +19,7 @@ def image_size(request): reason="The image is too big to be put in a cassette for a CI pipeline. We're looking into a solution." ) @requires_api_key("OPENAI_API_KEY") -def test_dalle(config, workspace, image_size): +def test_dalle(config, workspace, image_size, patched_api_requestor): """Test DALL-E image generation.""" generate_and_validate( config, diff --git a/tests/utils.py b/tests/utils.py index 2a0d25d8..2603dfe4 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -24,11 +24,11 @@ def requires_api_key(env_var): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - if not os.environ.get(env_var) and env_var == "OPENAI_API_KEY": - with dummy_openai_api_key(): - return func(*args, **kwargs) - else: - return func(*args, **kwargs) + if env_var == "OPENAI_API_KEY": + if not os.environ.get(env_var) and env_var == "OPENAI_API_KEY": + with dummy_openai_api_key(): + return func(*args, **kwargs) + return func(*args, **kwargs) return wrapper diff --git a/tests/vcr/vcr_filter.py b/tests/vcr/vcr_filter.py index 892b8021..3a58bee9 100644 --- a/tests/vcr/vcr_filter.py +++ b/tests/vcr/vcr_filter.py @@ -1,7 +1,10 @@ import json +import os import re from typing import Any, Dict, List +from tests.conftest import PROXY + REPLACEMENTS: List[Dict[str, str]] = [ { "regex": r"\w{3} \w{3} {1,2}\d{1,2} \d{2}:\d{2}:\d{2} \d{4}", @@ -13,6 +16,19 @@ REPLACEMENTS: List[Dict[str, str]] = [ }, ] +ALLOWED_HOSTNAMES: List[str] = [ + "api.openai.com", + "localhost:50337", +] + +if PROXY: + ALLOWED_HOSTNAMES.append(PROXY) + ORIGINAL_URL = PROXY +else: + ORIGINAL_URL = "no_ci" + +NEW_URL = "api.openai.com" + def replace_message_content(content: str, replacements: List[Dict[str, str]]) -> str: for replacement in replacements: @@ -53,6 +69,8 @@ def before_record_response(response: Dict[str, Any]) -> Dict[str, Any]: def before_record_request(request: Any) -> Any: + request = replace_request_hostname(request, ORIGINAL_URL, NEW_URL) + filtered_request = filter_hostnames(request) filtered_request_without_dynamic_data = replace_timestamp_in_request( filtered_request @@ -60,14 +78,24 @@ def before_record_request(request: Any) -> Any: return filtered_request_without_dynamic_data -def filter_hostnames(request: Any) -> Any: - allowed_hostnames: List[str] = [ - "api.openai.com", - "localhost:50337", - ] +from urllib.parse import urlparse, urlunparse + +def replace_request_hostname(request: Any, original_url: str, new_hostname: str) -> Any: + parsed_url = urlparse(request.uri) + + if parsed_url.hostname in original_url: + new_path = parsed_url.path.replace("/proxy_function", "") + request.uri = urlunparse( + parsed_url._replace(netloc=new_hostname, path=new_path, scheme="https") + ) + + return request + + +def filter_hostnames(request: Any) -> Any: # Add your implementation here for filtering hostnames - if any(hostname in request.url for hostname in allowed_hostnames): + if any(hostname in request.url for hostname in ALLOWED_HOSTNAMES): return request else: return None From a110ff94a5fe8173dd938c7e40d338abdc14230b Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 13 May 2023 16:25:56 -0700 Subject: [PATCH 51/56] test new CI (#4168) * test new CI * test new CI * remove double quotes --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8830775..ac931aa3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,6 +22,7 @@ jobs: with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref }} + repository: / - name: Set up Python ${{ env.min-python-version }} uses: actions/setup-python@v2 @@ -64,6 +65,7 @@ jobs: with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref }} + repository: / - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 From de6b8ee9f291cc066dafbab92683e51d14e0a021 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 13 May 2023 16:40:31 -0700 Subject: [PATCH 52/56] Feature/test new ci pipeline 2 (#4169) * test new CI * remove double quotes * make it a variable * make it a variable --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ac931aa3..490837fd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref }} - repository: / + repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Set up Python ${{ env.min-python-version }} uses: actions/setup-python@v2 @@ -65,7 +65,7 @@ jobs: with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref }} - repository: / + repository: ${{ github.event.pull_request.head.repo.full_name }} - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 From 2d9b9294d08aad063d8b873f1f201ef4c8b39081 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 13 May 2023 18:18:31 -0700 Subject: [PATCH 53/56] Test New CI Pipeline (#4170) * introduce dummy prompt change * introduce dummy prompt change * empty commit * empty commit * empty commit * push to origin repo * add s to quote --- .github/workflows/ci.yml | 4 ++-- autogpt/llm/llm_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 490837fd..ecd6bdb5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -97,5 +97,5 @@ jobs: git config user.name "GitHub Actions" git commit -m "Add new cassettes" git checkout -b cassette-diff-${{ github.event.pull_request.number }} - git push -f origin cassette-diff-${{ github.event.pull_request.number }} - echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV + git remote add target https://${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.event.pull_request.base.repo.full_name }} + git push -f target cassette-diff-${{ github.event.pull_request.number }} diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index 58b19735..a77bccbc 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -181,7 +181,7 @@ def create_chat_completion( ) warned_user = True except (APIError, Timeout) as e: - if e.http_status != 502 : + if e.http_status != 502: raise if attempt == num_retries - 1: raise From bc6f8a27ffa7e2e2419046177a0584bcfae954bb Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 13 May 2023 19:31:24 -0700 Subject: [PATCH 54/56] Feature/fix rate limiting issue Step 1 (#4173) * temporarilly remove 3.11 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ecd6bdb5..684bd117 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10", "3.11"] + python-version: ["3.10"] steps: - name: Check out repository From 15ebe23bc2720a5a822a62a50301998370668d69 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 14 May 2023 06:20:14 -0700 Subject: [PATCH 55/56] add back 3.11 (#4185) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 684bd117..ecd6bdb5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10"] + python-version: ["3.10", "3.11"] steps: - name: Check out repository From f778483ac37adcd2f2b6ccc0dc7e7466fb6e9a06 Mon Sep 17 00:00:00 2001 From: k-boikov <64261260+k-boikov@users.noreply.github.com> Date: Sun, 14 May 2023 16:30:10 +0300 Subject: [PATCH 56/56] Revert "Put back 3.11 until it's removed as a requirement" (#4191) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ecd6bdb5..684bd117 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10", "3.11"] + python-version: ["3.10"] steps: - name: Check out repository