mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 14:34:23 +01:00
Merge pull request #215 from slavakurilyak/security-and-robustness-improvements
Improve Security and Robustness in browse.py
This commit is contained in:
@@ -2,9 +2,31 @@ import requests
|
|||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from config import Config
|
from config import Config
|
||||||
from llm_utils import create_chat_completion
|
from llm_utils import create_chat_completion
|
||||||
|
from urllib.parse import urlparse, urljoin
|
||||||
|
|
||||||
cfg = Config()
|
cfg = Config()
|
||||||
|
|
||||||
|
# Function to check if the URL is valid
|
||||||
|
def is_valid_url(url):
|
||||||
|
try:
|
||||||
|
result = urlparse(url)
|
||||||
|
return all([result.scheme, result.netloc])
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Function to sanitize the URL
|
||||||
|
def sanitize_url(url):
|
||||||
|
return urljoin(url, urlparse(url).path)
|
||||||
|
|
||||||
|
# Function to make a request with a specified timeout and handle exceptions
|
||||||
|
def make_request(url, timeout=10):
|
||||||
|
try:
|
||||||
|
response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return "Error: " + str(e)
|
||||||
|
|
||||||
# Define and check for local file address prefixes
|
# Define and check for local file address prefixes
|
||||||
def check_local_file_access(url):
|
def check_local_file_access(url):
|
||||||
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
|
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
|
||||||
@@ -12,7 +34,7 @@ def check_local_file_access(url):
|
|||||||
|
|
||||||
def scrape_text(url):
|
def scrape_text(url):
|
||||||
"""Scrape text from a webpage"""
|
"""Scrape text from a webpage"""
|
||||||
# Most basic check if the URL is valid:
|
# Basic check if the URL is valid
|
||||||
if not url.startswith('http'):
|
if not url.startswith('http'):
|
||||||
return "Error: Invalid URL"
|
return "Error: Invalid URL"
|
||||||
|
|
||||||
@@ -20,14 +42,21 @@ def scrape_text(url):
|
|||||||
if check_local_file_access(url):
|
if check_local_file_access(url):
|
||||||
return "Error: Access to local files is restricted"
|
return "Error: Access to local files is restricted"
|
||||||
|
|
||||||
try:
|
# Validate the input URL
|
||||||
response = requests.get(url, headers=cfg.user_agent_header)
|
if not is_valid_url(url):
|
||||||
except requests.exceptions.RequestException as e:
|
# Sanitize the input URL
|
||||||
return "Error: " + str(e)
|
sanitized_url = sanitize_url(url)
|
||||||
|
|
||||||
# Check if the response contains an HTTP error
|
# Make the request with a timeout and handle exceptions
|
||||||
if response.status_code >= 400:
|
response = make_request(sanitized_url)
|
||||||
return "Error: HTTP " + str(response.status_code) + " error"
|
|
||||||
|
if isinstance(response, str):
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
# Sanitize the input URL
|
||||||
|
sanitized_url = sanitize_url(url)
|
||||||
|
|
||||||
|
response = requests.get(sanitized_url, headers=cfg.user_agent_header)
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user