File size: 1,667 Bytes
c509185 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from __future__ import annotations
from typing import Any, Dict, List, Optional
import requests
from bs4 import BeautifulSoup
from .base import BaseConnector, ConnectorConfig
from ..storage import Document
def _extract_text_from_html(html: str) -> str:
soup = BeautifulSoup(html, "html.parser")
for tag in soup(["script", "style", "noscript"]):
tag.decompose()
text = soup.get_text("\n")
lines = [l.strip() for l in text.splitlines()]
lines = [l for l in lines if l]
return "\n".join(lines)
class HTTPConnector(BaseConnector):
"""Fetches text from a URL with optional auth.
params expected:
- url: str
- auth_type: str in {none, basic, bearer}
- username/password for basic
- token for bearer
- headers: dict (optional)
"""
def fetch(self) -> List[Document]:
p: Dict[str, Any] = self.config.params
url = p["url"]
auth_type = (p.get("auth_type") or "none").lower()
headers: Dict[str, str] = p.get("headers", {})
auth = None
if auth_type == "basic":
auth = (p.get("username", ""), p.get("password", ""))
elif auth_type == "bearer":
token = p.get("token", "")
headers = {**headers, "Authorization": f"Bearer {token}"}
r = requests.get(url, headers=headers, auth=auth, timeout=30)
r.raise_for_status()
content_type = r.headers.get("content-type", "")
if "html" in content_type:
text = _extract_text_from_html(r.text)
else:
text = r.text
return [Document(text=text, source=url, metadata={"content_type": content_type})]
|