text
stringlengths 194
1.72M
|
|---|
"suivant leur fantaisie, s’agacent au pied de l’escalier, mordillent les véroniques en fleur da(...TRUNCATED)
|
"Mes études marchaient comme mes plaisirs, au petit pas. Je connaissais à fond le jardin botanique(...TRUNCATED)
|
"et une questions qu’ils adressent mentalement à l’auteur. « Qu’est-ce que le foyer de la da(...TRUNCATED)
|
"son cœur courut à Waterloo avec la dernière armée de l’Empire, et s’y brisa. Puis il serrai(...TRUNCATED)
|
"– Georges, seigneur et palatin de Sandomir, lui dit-elle en s’arrêtant auprès du berceau de l(...TRUNCATED)
|
"Arrivés au bord d’une esplanade dominant le vallon qui était en bas du château, et qu’on all(...TRUNCATED)
|
"inquiète d’un contrebandier. Il déjeunait tantôt chez lui, tantôt à l’auberge, ce qui, pou(...TRUNCATED)
|
"avec des reflets couleur d’or près des tempes. Elles étaient grandes et sveltes l’une et l’(...TRUNCATED)
|
"Nous partîmes tous en courant. Déjà les chaînes du pont-levis s’abaissaient. Notre élan fut (...TRUNCATED)
|
"quadrille assaillent la coupole de fer ajouré où elle a pris sa pose contemplative des soirs, l(...TRUNCATED)
|
End of preview. Expand
in Data Studio
Famous books belonging to the public domain
Thanks to "La Bibliothèque électronique du Québec" where I found the books PDF parsed and a bit cleaned out
For educational purpuses, I haven't read the terms of the website so feel free to do so to make sure that you can use this dataset for your use case
Credits to Jean-Yves Dupuis, the author of the website and to every authors of theses books. And credits to Daniel Fromont and Marc Galli that scanned some of theses books.
Don't forget to put them on your ebook and to read them, reading is good for you :)
Google colab script :
%pip install requests PyMuPDF beautifulsoup4
import os
import requests
import fitz # PyMuPDF
from urllib.parse import urlparse, unquote
import re
def sanitize_filename(name):
"""Remove invalid characters from filename"""
name = re.sub(r'[<>:"/\\|?*]', '_', name)
return name.strip()
def get_book_name_from_url(url):
"""Extract book name from URL"""
parsed = urlparse(url)
filename = unquote(os.path.basename(parsed.path))
if filename.lower().endswith('.pdf'):
filename = filename[:-4]
return sanitize_filename(filename)
def download_pdf(url, temp_path):
"""Download PDF from URL"""
response = requests.get(url, stream=True, timeout=60)
response.raise_for_status()
with open(temp_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return temp_path
def is_metadata_line(line):
"""Check if a line is likely metadata (not story content)"""
line = line.strip()
# Empty lines - return special marker
if not line:
return "empty"
# Page numbers (standalone numbers)
if re.match(r'^\d+$', line):
return "page_number"
# Roman numerals alone (chapter numbers)
if re.match(r'^[IVXLCDM]+$', line):
return "chapter_number"
# Chapter indicators
if re.match(r'^(Chapitre|CHAPITRE|Chapter|CHAPTER)\s*[IVXLCDM\d]+', line, re.IGNORECASE):
return "chapter_title"
# Part/Tome indicators
if re.match(r'^(Partie|PARTIE|Part|PART|Tome|TOME|Livre|LIVRE)\s*[IVXLCDM\d]+', line, re.IGNORECASE):
return "part_title"
return False
def is_header_block(line):
"""Check if line is header/metadata"""
line = line.strip()
# Common header patterns
header_patterns = [
r'^BeQ$',
r'Bibliothèque électronique',
r'Collection À tous les vents',
r'^Volume \d+',
r'^version \d+',
r'Édition de référence',
r'^Traduit de',
r'^Traduction',
r'^Avertissement$',
r'^Préface$',
r'^PRÉFACE$',
r'^Introduction$',
r'^INTRODUCTION$',
r'^Table des matières',
r'^TABLE DES MATIÈRES',
r'©|Copyright',
r'ISBN',
r'Imprimé en|Printed in',
r'^Du même auteur',
r'Librairie|Éditeur|Éditions',
r'^\d{4}$', # Years like 1890
r'^FIN$',
r'^fin$',
r'^Fin$',
]
for pattern in header_patterns:
if re.search(pattern, line, re.IGNORECASE):
return True
return False
def is_paragraph_break(line):
"""Determine if this line should cause a paragraph break"""
stripped = line.strip()
# Empty line = paragraph break
if not stripped:
return True
# Very short lines that are ALL CAPS (titles)
if stripped.isupper() and len(stripped) > 3:
return True
return False
def clean_book_text(raw_text):
"""Clean extracted text to keep only narrative content as continuous text"""
lines = raw_text.split('\n')
# Find where actual content starts (look for longer paragraphs)
content_start = 0
consecutive_long_lines = 0
for i, line in enumerate(lines):
stripped = line.strip()
# Look for consecutive lines that look like narrative text
if len(stripped) > 50 and not is_header_block(stripped):
consecutive_long_lines += 1
if consecutive_long_lines >= 3:
content_start = max(0, i - 3)
break
else:
consecutive_long_lines = 0
# Process lines and build continuous text
paragraphs = []
current_paragraph = []
for i in range(content_start, len(lines)):
line = lines[i]
stripped = line.strip()
# Check for metadata
meta_type = is_metadata_line(stripped)
# Skip page numbers and chapter numbers entirely (don't break paragraph)
if meta_type in ["page_number", "chapter_number"]:
continue
# Chapter/part titles cause paragraph break but are not included
if meta_type in ["chapter_title", "part_title"]:
if current_paragraph:
paragraphs.append(' '.join(current_paragraph))
current_paragraph = []
continue
# Skip header blocks
if is_header_block(stripped):
if current_paragraph:
paragraphs.append(' '.join(current_paragraph))
current_paragraph = []
continue
# Skip ALL CAPS lines (titles)
if stripped.isupper() and len(stripped) > 3:
if current_paragraph:
paragraphs.append(' '.join(current_paragraph))
current_paragraph = []
continue
# Empty line = paragraph break
if meta_type == "empty":
if current_paragraph:
paragraphs.append(' '.join(current_paragraph))
current_paragraph = []
continue
# Regular text line - add to current paragraph
# Handle hyphenation at end of line
if current_paragraph and current_paragraph[-1].endswith('-'):
# Remove hyphen and join directly
current_paragraph[-1] = current_paragraph[-1][:-1] + stripped
else:
current_paragraph.append(stripped)
# Don't forget last paragraph
if current_paragraph:
paragraphs.append(' '.join(current_paragraph))
# Join all paragraphs into one continuous text (single space between paragraphs)
result = ' '.join(paragraphs)
# Clean up extra whitespace
result = re.sub(r' +', ' ', result)
# Fix common OCR/extraction issues
result = re.sub(r' ([.,;:!?])', r'\1', result) # Remove space before punctuation
result = re.sub(r'([.,;:!?])([A-Za-zÀ-ÿ])', r'\1 \2', result) # Add space after punctuation if missing
return result.strip()
def extract_text_from_pdf(pdf_path):
"""Extract text from PDF using PyMuPDF"""
text = ""
with fitz.open(pdf_path) as doc:
for page in doc:
text += page.get_text()
return text
def process_pdf_urls(urls, output_dir="books"):
"""Download PDFs, extract text, and save to txt files"""
os.makedirs(output_dir, exist_ok=True)
for url in urls:
try:
print(f"Processing: {url}")
book_name = get_book_name_from_url(url)
temp_pdf = f"temp_{book_name}.pdf"
# Download PDF
print(f" Downloading...")
download_pdf(url, temp_pdf)
# Extract text
print(f" Extracting text...")
raw_text = extract_text_from_pdf(temp_pdf)
# Clean the text to keep only narrative content
print(f" Cleaning text...")
cleaned_text = clean_book_text(raw_text)
# Save to txt file
output_path = os.path.join(output_dir, f"{book_name}.txt")
with open(output_path, 'w', encoding='utf-8') as f:
f.write(cleaned_text)
print(f" Saved to: {output_path}")
# Clean up temp file
os.remove(temp_pdf)
except Exception as e:
print(f" Error processing {url}: {e}")
if os.path.exists(temp_pdf):
os.remove(temp_pdf)
print("Functions loaded. Ready to process PDFs!")
import requests
from bs4 import BeautifulSoup
import re
def get_pdf_urls_from_beq(page_url="https://beq.ebooksgratuits.com/vents/"):
"""Scrape all PDF links from the BEQ website"""
response = requests.get(page_url, timeout=60)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
pdf_urls = set()
for link in soup.find_all('a', href=True):
href = link['href']
if href.endswith('.pdf'):
# Make absolute URL if relative
if href.startswith('http'):
pdf_urls.add(href)
else:
# Handle relative URLs
from urllib.parse import urljoin
pdf_urls.add(urljoin(page_url, href))
# Filter to only include the main PDF versions (not xpdf duplicates)
# Prefer the vents/ folder PDFs
final_urls = []
for url in sorted(pdf_urls):
if '/vents/' in url and '/vents-xpdf/' not in url:
final_urls.append(url)
return final_urls
# Get all PDF URLs from the website
print("Fetching PDF links from https://beq.ebooksgratuits.com/vents/ ...")
pdf_urls = get_pdf_urls_from_beq()
print(f"Found {len(pdf_urls)} PDF files!\n")
# Show first 20 as preview
print("First 20 PDFs:")
for i, url in enumerate(pdf_urls[:20], 1):
print(f"{i}. {url.split('/')[-1]}")
print(f"\n... and {len(pdf_urls) - 20} more PDFs")
# Process all PDFs (or a subset for testing)
# Change the slice to process more or fewer PDFs
# Example: pdf_urls[:10] for first 10, pdf_urls for all
urls_to_process = pdf_urls[:700] # Start with first 5 for testing
print(f"Processing {len(urls_to_process)} PDFs...")
process_pdf_urls(urls_to_process, output_dir="books")
!ls books
!less books/About-Germaine.txt
%pip install datasets huggingface_hub
from huggingface_hub import login
# Login to Hugging Face (you'll need your token from https://huggingface.co/settings/tokens)
login()
from datasets import Dataset
import os
def create_and_upload_dataset(books_dir="books", dataset_name="french-books", username=None):
"""
Create a Hugging Face dataset from txt files and upload it.
Args:
books_dir: Directory containing the txt files
dataset_name: Name for the dataset on Hugging Face
username: Your Hugging Face username (if None, will use the logged-in user)
"""
# Read all txt files
texts = []
for filename in sorted(os.listdir(books_dir)):
if filename.endswith('.txt'):
filepath = os.path.join(books_dir, filename)
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read().strip()
if content: # Only add non-empty books
texts.append(content)
print(f"Loaded: {filename} ({len(content):,} characters)")
print(f"\nTotal books loaded: {len(texts)}")
# Create dataset with only 'text' column
dataset = Dataset.from_dict({"text": texts})
print(f"\nDataset created with {len(dataset)} rows")
print(dataset)
# Push to Hugging Face Hub
repo_id = f"{username}/{dataset_name}" if username else dataset_name
print(f"\nUploading to Hugging Face Hub as '{repo_id}'...")
dataset.push_to_hub(repo_id, private=False)
print(f"\n✅ Dataset uploaded successfully!")
print(f"View it at: https://huggingface.co/datasets/{repo_id}")
return dataset
# Create and upload the dataset
# Replace 'YOUR_USERNAME' with your Hugging Face username
dataset = create_and_upload_dataset(
books_dir="books",
dataset_name="french-classic-books",
username="Volko76" # <-- Change this to your HF username
)
- Downloads last month
- 39