mirror of
https://github.com/aljazceru/vibeline.git
synced 2026-01-16 21:14:22 +01:00
Extract prompts to markdown files and add dynamic prompt selection based on transcript content
This commit is contained in:
14
prompts/default.md
Normal file
14
prompts/default.md
Normal file
@@ -0,0 +1,14 @@
|
||||
Please provide a concise summary of the following transcript.
|
||||
Focus on the main topics, key points, and any action items or decisions mentioned.
|
||||
Keep the summary short, clear, and well-structured.
|
||||
|
||||
Transcript:
|
||||
{transcript}
|
||||
|
||||
Summary:
|
||||
{summary}
|
||||
|
||||
Action items:
|
||||
- [ ] Item one
|
||||
- [ ] Item two
|
||||
- [ ] ...
|
||||
12
prompts/idea_app.md
Normal file
12
prompts/idea_app.md
Normal file
@@ -0,0 +1,12 @@
|
||||
Please analyze this transcript about an app idea and provide a structured summary focusing on:
|
||||
1. The core app concept and its main purpose
|
||||
2. Key features and functionality discussed
|
||||
3. Target audience or user base
|
||||
4. Any technical considerations or implementation details
|
||||
5. Potential challenges or concerns raised
|
||||
6. Next steps or action items mentioned
|
||||
|
||||
Transcript:
|
||||
{transcript}
|
||||
|
||||
Summary:
|
||||
@@ -10,16 +10,26 @@ def read_transcript(transcript_file: Path) -> str:
|
||||
with open(transcript_file, 'r', encoding='utf-8') as f:
|
||||
return f.read()
|
||||
|
||||
def load_prompt_template(transcript_text: str) -> str:
|
||||
"""Load the appropriate prompt template based on transcript content."""
|
||||
prompt_dir = Path("prompts")
|
||||
|
||||
# Check if transcript contains app-related content
|
||||
if "idea" in transcript_text.lower() and "app" in transcript_text.lower():
|
||||
prompt_file = prompt_dir / "idea_app.md"
|
||||
else:
|
||||
prompt_file = prompt_dir / "default.md"
|
||||
|
||||
with open(prompt_file, 'r', encoding='utf-8') as f:
|
||||
return f.read()
|
||||
|
||||
def process_transcript(transcript_text: str) -> str:
|
||||
"""Process a transcript using LLaMA to generate a summary."""
|
||||
prompt = f"""Please provide a concise summary of the following transcript.
|
||||
Focus on the main topics, key points, and any action items or decisions mentioned.
|
||||
Keep the summary clear and well-structured.
|
||||
|
||||
Transcript:
|
||||
{transcript_text}
|
||||
|
||||
Summary:"""
|
||||
# Load the appropriate prompt template
|
||||
prompt_template = load_prompt_template(transcript_text)
|
||||
|
||||
# Format the prompt with the transcript
|
||||
prompt = prompt_template.format(transcript=transcript_text)
|
||||
|
||||
# Use Ollama to generate the summary
|
||||
response = ollama.chat(model='llama2', messages=[
|
||||
|
||||
Reference in New Issue
Block a user