mirror of
https://github.com/aljazceru/Tutorial-Codebase-Knowledge.git
synced 2025-12-18 15:04:20 +01:00
update the llm setup
This commit is contained in:
29
README.md
29
README.md
@@ -70,18 +70,23 @@ This is a tutorial project of [Pocket Flow](https://github.com/The-Pocket/Pocket
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Generate a complete codebase tutorial by running the main script:
|
||||
```bash
|
||||
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
|
||||
```
|
||||
- `repo_url` - URL of the GitHub repository (required)
|
||||
- `-n, --name` - Project name (optional, derived from URL if omitted)
|
||||
- `-t, --token` - GitHub token (or set GITHUB_TOKEN environment variable)
|
||||
- `-o, --output` - Output directory (default: ./output)
|
||||
- `-i, --include` - Files to include (e.g., "*.py" "*.js")
|
||||
- `-e, --exclude` - Files to exclude (e.g., "tests/*" "docs/*")
|
||||
- `-s, --max-size` - Maximum file size in bytes (default: 100KB)
|
||||
|
||||
3. Set up LLM in [`utils/call_llm.py`](./utils/call_llm.py) by providing credentials (API key or project name). We highly recommend the latest models with thinking capabilities (Gemini Pro 2.5, Claude 3.7 with thinking, O1). You can verify if it is correctly set up by running:
|
||||
```bash
|
||||
python utils/call_llm.py
|
||||
```
|
||||
|
||||
4. Generate a complete codebase tutorial by running the main script:
|
||||
```bash
|
||||
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
|
||||
```
|
||||
- `repo_url` - URL of the GitHub repository (required)
|
||||
- `-n, --name` - Project name (optional, derived from URL if omitted)
|
||||
- `-t, --token` - GitHub token (or set GITHUB_TOKEN environment variable)
|
||||
- `-o, --output` - Output directory (default: ./output)
|
||||
- `-i, --include` - Files to include (e.g., "*.py" "*.js")
|
||||
- `-e, --exclude` - Files to exclude (e.g., "tests/*" "docs/*")
|
||||
- `-s, --max-size` - Maximum file size in bytes (default: 100KB)
|
||||
|
||||
The application will crawl the repository, analyze the codebase structure, generate tutorial content, and save the output in the specified directory (default: ./output).
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ logger.addHandler(file_handler)
|
||||
# Simple cache configuration
|
||||
cache_file = "llm_cache.json"
|
||||
|
||||
# By default, we Google Gemini 2.5 pro, as it shows great performance for code understanding
|
||||
def call_llm(prompt: str, use_cache: bool = True) -> str:
|
||||
# Log the prompt
|
||||
logger.info(f"PROMPT: {prompt}")
|
||||
@@ -43,6 +44,7 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
|
||||
# Call the LLM if not in cache or cache disabled
|
||||
client = genai.Client(
|
||||
vertexai=True,
|
||||
# TODO: change to your own project id and location
|
||||
project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"),
|
||||
location=os.getenv("GEMINI_LOCATION", "us-central1")
|
||||
)
|
||||
@@ -77,6 +79,38 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
|
||||
|
||||
return response_text
|
||||
|
||||
# # Use Anthropic Claude 3.7 Sonnet Extended Thinking
|
||||
# def call_llm(prompt, use_cache: bool = True):
|
||||
# from anthropic import Anthropic
|
||||
# client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
|
||||
# response = client.messages.create(
|
||||
# model="claude-3-7-sonnet-20250219",
|
||||
# max_tokens=21000,
|
||||
# thinking={
|
||||
# "type": "enabled",
|
||||
# "budget_tokens": 20000
|
||||
# },
|
||||
# messages=[
|
||||
# {"role": "user", "content": prompt}
|
||||
# ]
|
||||
# )
|
||||
# return response.content[1].text
|
||||
|
||||
# # Use OpenAI o1
|
||||
# def call_llm(prompt, use_cache: bool = True):
|
||||
# from openai import OpenAI
|
||||
# client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
||||
# r = client.chat.completions.create(
|
||||
# model="o1",
|
||||
# messages=[{"role": "user", "content": prompt}],
|
||||
# response_format={
|
||||
# "type": "text"
|
||||
# },
|
||||
# reasoning_effort="medium",
|
||||
# store=False
|
||||
# )
|
||||
# return r.choices[0].message.content
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_prompt = "Hello, how are you?"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user