mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-01 13:24:22 +01:00
Sync release v0.4.4 + stable back into master (#4947)
This commit is contained in:
13
.github/workflows/ci.yml
vendored
13
.github/workflows/ci.yml
vendored
@@ -108,22 +108,27 @@ jobs:
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin ${{ github.event.pull_request.base.ref }}
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }}
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '${{ github.event.pull_request.base.ref }}'."
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '${{ github.event.pull_request.base.ref }}'."
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
||||
43
BULLETIN.md
43
BULLETIN.md
@@ -1,22 +1,29 @@
|
||||
# Website and Documentation Site 📰📖
|
||||
Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
|
||||
The documentation also has a place here, at *https://docs.agpt.co*
|
||||
# QUICK LINKS 🔗
|
||||
# --------------
|
||||
🌎 *Official Website*: https://agpt.co.
|
||||
📖 *User Guide*: https://docs.agpt.co.
|
||||
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
|
||||
|
||||
# For contributors 👷🏼
|
||||
Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D.
|
||||
# v0.4.4 RELEASE HIGHLIGHTS! 🚀
|
||||
# -----------------------------
|
||||
## GPT-4 is back!
|
||||
Following OpenAI's recent GPT-4 GA announcement, the SMART_LLM .env setting
|
||||
now defaults to GPT-4, and Auto-GPT will use GPT-4 by default in its main loop.
|
||||
|
||||
Check out the contribution guide on our wiki:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
### !! High Costs Warning !! 💰💀🚨
|
||||
GPT-4 costs ~20x more than GPT-3.5-turbo.
|
||||
Please take note of this before using SMART_LLM. You can use `--gpt3only`
|
||||
or `--gpt4only` to force the use of GPT-3.5-turbo or GPT-4, respectively,
|
||||
at runtime.
|
||||
|
||||
# 🚀 v0.4.3 Release 🚀
|
||||
We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
|
||||
extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
|
||||
for future compatibility with OpenAI's function calling feature.
|
||||
## Re-arch v1 preview release!
|
||||
We've released a preview version of the re-arch code, under `autogpt/core`.
|
||||
This is a major milestone for us, and we're excited to continue working on it.
|
||||
We look forward to your feedback. Follow the process here:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/issues/4770.
|
||||
|
||||
Key Highlights:
|
||||
- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
|
||||
- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
|
||||
- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
|
||||
- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
|
||||
## Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases
|
||||
## Other highlights
|
||||
Other fixes include plugins regressions, Azure config and security patches.
|
||||
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases.
|
||||
|
||||
@@ -144,7 +144,18 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
|
||||
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
|
||||
return p
|
||||
|
||||
def get_azure_kwargs(self, model: str) -> dict[str, str]:
|
||||
def get_openai_credentials(self, model: str) -> dict[str, str]:
|
||||
credentials = {
|
||||
"api_key": self.openai_api_key,
|
||||
"api_base": self.openai_api_base,
|
||||
"organization": self.openai_organization,
|
||||
}
|
||||
if self.use_azure:
|
||||
azure_credentials = self.get_azure_credentials(model)
|
||||
credentials.update(azure_credentials)
|
||||
return credentials
|
||||
|
||||
def get_azure_credentials(self, model: str) -> dict[str, str]:
|
||||
"""Get the kwargs for the Azure API."""
|
||||
|
||||
# Fix --gpt3only and --gpt4only in combination with Azure
|
||||
|
||||
@@ -78,17 +78,14 @@ def create_text_completion(
|
||||
if temperature is None:
|
||||
temperature = config.temperature
|
||||
|
||||
if config.use_azure:
|
||||
kwargs = config.get_azure_kwargs(model)
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
kwargs = {"model": model}
|
||||
kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
response = iopenai.create_text_completion(
|
||||
prompt=prompt,
|
||||
**kwargs,
|
||||
temperature=temperature,
|
||||
max_tokens=max_output_tokens,
|
||||
api_key=config.openai_api_key,
|
||||
)
|
||||
logger.debug(f"Response: {response}")
|
||||
|
||||
@@ -150,9 +147,7 @@ def create_chat_completion(
|
||||
if message is not None:
|
||||
return message
|
||||
|
||||
chat_completion_kwargs["api_key"] = config.openai_api_key
|
||||
if config.use_azure:
|
||||
chat_completion_kwargs.update(config.get_azure_kwargs(model))
|
||||
chat_completion_kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
if functions:
|
||||
chat_completion_kwargs["functions"] = [
|
||||
@@ -196,12 +191,7 @@ def check_model(
|
||||
config: Config,
|
||||
) -> str:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
openai_credentials = {
|
||||
"api_key": config.openai_api_key,
|
||||
}
|
||||
if config.use_azure:
|
||||
openai_credentials.update(config.get_azure_kwargs(model_name))
|
||||
|
||||
openai_credentials = config.get_openai_credentials(model_name)
|
||||
api_manager = ApiManager()
|
||||
models = api_manager.get_models(**openai_credentials)
|
||||
|
||||
|
||||
@@ -41,10 +41,8 @@ def get_embedding(
|
||||
input = [text.replace("\n", " ") for text in input]
|
||||
|
||||
model = config.embedding_model
|
||||
if config.use_azure:
|
||||
kwargs = config.get_azure_kwargs(model)
|
||||
else:
|
||||
kwargs = {"model": model}
|
||||
kwargs = {"model": model}
|
||||
kwargs.update(config.get_openai_credentials(model))
|
||||
|
||||
logger.debug(
|
||||
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
|
||||
@@ -57,7 +55,6 @@ def get_embedding(
|
||||
embeddings = iopenai.create_embedding(
|
||||
input,
|
||||
**kwargs,
|
||||
api_key=config.openai_api_key,
|
||||
).data
|
||||
|
||||
if not multiple:
|
||||
|
||||
@@ -137,7 +137,6 @@ def summarize_text(
|
||||
logger.info(f"Summarized {len(chunks)} chunks")
|
||||
|
||||
summary, _ = summarize_text("\n\n".join(summaries), config)
|
||||
|
||||
return summary.strip(), [
|
||||
(summaries[i], chunks[i][0]) for i in range(0, len(chunks))
|
||||
]
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "agpt"
|
||||
version = "0.4.3"
|
||||
version = "0.4.4"
|
||||
authors = [
|
||||
{ name="Torantulino", email="support@agpt.co" },
|
||||
]
|
||||
|
||||
@@ -174,18 +174,32 @@ azure_model_map:
|
||||
|
||||
fast_llm = config.fast_llm
|
||||
smart_llm = config.smart_llm
|
||||
assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID"
|
||||
assert (
|
||||
config.get_azure_credentials(config.fast_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
)
|
||||
assert (
|
||||
config.get_azure_credentials(config.smart_llm)["deployment_id"]
|
||||
== "SMART-LLM_ID"
|
||||
)
|
||||
|
||||
# Emulate --gpt4only
|
||||
config.fast_llm = smart_llm
|
||||
assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "SMART-LLM_ID"
|
||||
assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "SMART-LLM_ID"
|
||||
assert (
|
||||
config.get_azure_credentials(config.fast_llm)["deployment_id"] == "SMART-LLM_ID"
|
||||
)
|
||||
assert (
|
||||
config.get_azure_credentials(config.smart_llm)["deployment_id"]
|
||||
== "SMART-LLM_ID"
|
||||
)
|
||||
|
||||
# Emulate --gpt3only
|
||||
config.fast_llm = config.smart_llm = fast_llm
|
||||
assert config.get_azure_kwargs(config.fast_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
assert config.get_azure_kwargs(config.smart_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
assert (
|
||||
config.get_azure_credentials(config.fast_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
)
|
||||
assert (
|
||||
config.get_azure_credentials(config.smart_llm)["deployment_id"] == "FAST-LLM_ID"
|
||||
)
|
||||
|
||||
del os.environ["USE_AZURE"]
|
||||
del os.environ["AZURE_CONFIG_FILE"]
|
||||
|
||||
Reference in New Issue
Block a user