mirror of
https://github.com/aljazceru/Tutorial-Codebase-Knowledge.git
synced 2026-02-06 23:14:26 +01:00
update the llm setup
This commit is contained in:
@@ -70,7 +70,12 @@ This is a tutorial project of [Pocket Flow](https://github.com/The-Pocket/Pocket
|
|||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Generate a complete codebase tutorial by running the main script:
|
3. Set up LLM in [`utils/call_llm.py`](./utils/call_llm.py) by providing credentials (API key or project name). We highly recommend the latest models with thinking capabilities (Gemini Pro 2.5, Claude 3.7 with thinking, O1). You can verify if it is correctly set up by running:
|
||||||
|
```bash
|
||||||
|
python utils/call_llm.py
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Generate a complete codebase tutorial by running the main script:
|
||||||
```bash
|
```bash
|
||||||
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
|
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ logger.addHandler(file_handler)
|
|||||||
# Simple cache configuration
|
# Simple cache configuration
|
||||||
cache_file = "llm_cache.json"
|
cache_file = "llm_cache.json"
|
||||||
|
|
||||||
|
# By default, we Google Gemini 2.5 pro, as it shows great performance for code understanding
|
||||||
def call_llm(prompt: str, use_cache: bool = True) -> str:
|
def call_llm(prompt: str, use_cache: bool = True) -> str:
|
||||||
# Log the prompt
|
# Log the prompt
|
||||||
logger.info(f"PROMPT: {prompt}")
|
logger.info(f"PROMPT: {prompt}")
|
||||||
@@ -43,6 +44,7 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
|
|||||||
# Call the LLM if not in cache or cache disabled
|
# Call the LLM if not in cache or cache disabled
|
||||||
client = genai.Client(
|
client = genai.Client(
|
||||||
vertexai=True,
|
vertexai=True,
|
||||||
|
# TODO: change to your own project id and location
|
||||||
project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"),
|
project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"),
|
||||||
location=os.getenv("GEMINI_LOCATION", "us-central1")
|
location=os.getenv("GEMINI_LOCATION", "us-central1")
|
||||||
)
|
)
|
||||||
@@ -77,6 +79,38 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
|
|||||||
|
|
||||||
return response_text
|
return response_text
|
||||||
|
|
||||||
|
# # Use Anthropic Claude 3.7 Sonnet Extended Thinking
|
||||||
|
# def call_llm(prompt, use_cache: bool = True):
|
||||||
|
# from anthropic import Anthropic
|
||||||
|
# client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
|
||||||
|
# response = client.messages.create(
|
||||||
|
# model="claude-3-7-sonnet-20250219",
|
||||||
|
# max_tokens=21000,
|
||||||
|
# thinking={
|
||||||
|
# "type": "enabled",
|
||||||
|
# "budget_tokens": 20000
|
||||||
|
# },
|
||||||
|
# messages=[
|
||||||
|
# {"role": "user", "content": prompt}
|
||||||
|
# ]
|
||||||
|
# )
|
||||||
|
# return response.content[1].text
|
||||||
|
|
||||||
|
# # Use OpenAI o1
|
||||||
|
# def call_llm(prompt, use_cache: bool = True):
|
||||||
|
# from openai import OpenAI
|
||||||
|
# client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
||||||
|
# r = client.chat.completions.create(
|
||||||
|
# model="o1",
|
||||||
|
# messages=[{"role": "user", "content": prompt}],
|
||||||
|
# response_format={
|
||||||
|
# "type": "text"
|
||||||
|
# },
|
||||||
|
# reasoning_effort="medium",
|
||||||
|
# store=False
|
||||||
|
# )
|
||||||
|
# return r.choices[0].message.content
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_prompt = "Hello, how are you?"
|
test_prompt = "Hello, how are you?"
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user