الميزات: إضافة أدوات جديدة لمعالجة ملفات PDF، تشمل التلخيص والترجمة واستخراج الجداول.
- تفعيل مكون SummarizePdf لإنشاء ملخصات PDF باستخدام الذكاء الاصطناعي. - تفعيل مكون TranslatePdf لترجمة محتوى PDF إلى لغات متعددة. - تفعيل مكون TableExtractor لاستخراج الجداول من ملفات PDF. - تحديث الصفحة الرئيسية والتوجيه ليشمل الأدوات الجديدة. - إضافة ترجمات للأدوات الجديدة باللغات الإنجليزية والعربية والفرنسية. - توسيع أنواع واجهة برمجة التطبيقات (API) لدعم الميزات الجديدة المتعلقة بمعالجة ملفات PDF. --feat: Initialize frontend with React, Vite, and Tailwind CSS - Set up main entry point for React application. - Create About, Home, NotFound, Privacy, and Terms pages with SEO support. - Implement API service for file uploads and task management. - Add global styles using Tailwind CSS. - Create utility functions for SEO and text processing. - Configure Vite for development and production builds. - Set up Nginx configuration for serving frontend and backend. - Add scripts for cleanup of expired files and sitemap generation. - Implement deployment script for production environment.
This commit is contained in:
@@ -93,6 +93,11 @@ def create_app(config_name=None):
|
|||||||
from app.routes.ocr import ocr_bp
|
from app.routes.ocr import ocr_bp
|
||||||
from app.routes.removebg import removebg_bp
|
from app.routes.removebg import removebg_bp
|
||||||
from app.routes.pdf_editor import pdf_editor_bp
|
from app.routes.pdf_editor import pdf_editor_bp
|
||||||
|
from app.routes.compress_image import compress_image_bp
|
||||||
|
from app.routes.pdf_to_excel import pdf_to_excel_bp
|
||||||
|
from app.routes.qrcode import qrcode_bp
|
||||||
|
from app.routes.html_to_pdf import html_to_pdf_bp
|
||||||
|
from app.routes.pdf_ai import pdf_ai_bp
|
||||||
|
|
||||||
app.register_blueprint(health_bp, url_prefix="/api")
|
app.register_blueprint(health_bp, url_prefix="/api")
|
||||||
app.register_blueprint(auth_bp, url_prefix="/api/auth")
|
app.register_blueprint(auth_bp, url_prefix="/api/auth")
|
||||||
@@ -112,5 +117,10 @@ def create_app(config_name=None):
|
|||||||
app.register_blueprint(ocr_bp, url_prefix="/api/ocr")
|
app.register_blueprint(ocr_bp, url_prefix="/api/ocr")
|
||||||
app.register_blueprint(removebg_bp, url_prefix="/api/remove-bg")
|
app.register_blueprint(removebg_bp, url_prefix="/api/remove-bg")
|
||||||
app.register_blueprint(pdf_editor_bp, url_prefix="/api/pdf-editor")
|
app.register_blueprint(pdf_editor_bp, url_prefix="/api/pdf-editor")
|
||||||
|
app.register_blueprint(compress_image_bp, url_prefix="/api/image")
|
||||||
|
app.register_blueprint(pdf_to_excel_bp, url_prefix="/api/convert")
|
||||||
|
app.register_blueprint(qrcode_bp, url_prefix="/api/qrcode")
|
||||||
|
app.register_blueprint(html_to_pdf_bp, url_prefix="/api/convert")
|
||||||
|
app.register_blueprint(pdf_ai_bp, url_prefix="/api/pdf-ai")
|
||||||
|
|
||||||
return app
|
return app
|
||||||
|
|||||||
@@ -35,6 +35,11 @@ def init_celery(app):
|
|||||||
"app.tasks.ocr_tasks.*": {"queue": "image"},
|
"app.tasks.ocr_tasks.*": {"queue": "image"},
|
||||||
"app.tasks.removebg_tasks.*": {"queue": "image"},
|
"app.tasks.removebg_tasks.*": {"queue": "image"},
|
||||||
"app.tasks.pdf_editor_tasks.*": {"queue": "pdf_tools"},
|
"app.tasks.pdf_editor_tasks.*": {"queue": "pdf_tools"},
|
||||||
|
"app.tasks.compress_image_tasks.*": {"queue": "image"},
|
||||||
|
"app.tasks.pdf_to_excel_tasks.*": {"queue": "pdf_tools"},
|
||||||
|
"app.tasks.qrcode_tasks.*": {"queue": "default"},
|
||||||
|
"app.tasks.html_to_pdf_tasks.*": {"queue": "convert"},
|
||||||
|
"app.tasks.pdf_ai_tasks.*": {"queue": "default"},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Celery Beat — periodic tasks
|
# Celery Beat — periodic tasks
|
||||||
|
|||||||
72
backend/app/routes/compress_image.py
Normal file
72
backend/app/routes/compress_image.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
"""Image compression routes."""
|
||||||
|
from flask import Blueprint, request, jsonify
|
||||||
|
|
||||||
|
from app.extensions import limiter
|
||||||
|
from app.services.policy_service import (
|
||||||
|
assert_quota_available,
|
||||||
|
build_task_tracking_kwargs,
|
||||||
|
PolicyError,
|
||||||
|
record_accepted_usage,
|
||||||
|
resolve_web_actor,
|
||||||
|
validate_actor_file,
|
||||||
|
)
|
||||||
|
from app.utils.file_validator import FileValidationError
|
||||||
|
from app.utils.sanitizer import generate_safe_path
|
||||||
|
from app.tasks.compress_image_tasks import compress_image_task
|
||||||
|
|
||||||
|
compress_image_bp = Blueprint("compress_image", __name__)
|
||||||
|
|
||||||
|
ALLOWED_IMAGE_TYPES = ["png", "jpg", "jpeg", "webp"]
|
||||||
|
|
||||||
|
|
||||||
|
@compress_image_bp.route("/compress", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def compress_image_route():
|
||||||
|
"""
|
||||||
|
Compress an image file.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': Image file (PNG, JPG, JPEG, WebP)
|
||||||
|
- 'quality' (optional): Quality 1-100 (default: 75)
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
quality = request.form.get("quality", "75")
|
||||||
|
|
||||||
|
try:
|
||||||
|
quality = max(1, min(100, int(quality)))
|
||||||
|
except ValueError:
|
||||||
|
quality = 75
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=ALLOWED_IMAGE_TYPES, actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = compress_image_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
quality,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "compress-image", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Image compression started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
62
backend/app/routes/html_to_pdf.py
Normal file
62
backend/app/routes/html_to_pdf.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
"""HTML to PDF conversion routes."""
|
||||||
|
from flask import Blueprint, request, jsonify
|
||||||
|
|
||||||
|
from app.extensions import limiter
|
||||||
|
from app.services.policy_service import (
|
||||||
|
assert_quota_available,
|
||||||
|
build_task_tracking_kwargs,
|
||||||
|
PolicyError,
|
||||||
|
record_accepted_usage,
|
||||||
|
resolve_web_actor,
|
||||||
|
validate_actor_file,
|
||||||
|
)
|
||||||
|
from app.utils.file_validator import FileValidationError
|
||||||
|
from app.utils.sanitizer import generate_safe_path
|
||||||
|
from app.tasks.html_to_pdf_tasks import html_to_pdf_task
|
||||||
|
|
||||||
|
html_to_pdf_bp = Blueprint("html_to_pdf", __name__)
|
||||||
|
|
||||||
|
|
||||||
|
@html_to_pdf_bp.route("/html-to-pdf", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def html_to_pdf_route():
|
||||||
|
"""
|
||||||
|
Convert an HTML file to PDF.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': HTML file
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["html", "htm"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = html_to_pdf_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "html-to-pdf", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "HTML to PDF conversion started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
232
backend/app/routes/pdf_ai.py
Normal file
232
backend/app/routes/pdf_ai.py
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
"""PDF AI tool routes — Chat, Summarize, Translate, Table Extract."""
|
||||||
|
from flask import Blueprint, request, jsonify
|
||||||
|
|
||||||
|
from app.extensions import limiter
|
||||||
|
from app.services.policy_service import (
|
||||||
|
assert_quota_available,
|
||||||
|
build_task_tracking_kwargs,
|
||||||
|
PolicyError,
|
||||||
|
record_accepted_usage,
|
||||||
|
resolve_web_actor,
|
||||||
|
validate_actor_file,
|
||||||
|
)
|
||||||
|
from app.utils.file_validator import FileValidationError
|
||||||
|
from app.utils.sanitizer import generate_safe_path
|
||||||
|
from app.tasks.pdf_ai_tasks import (
|
||||||
|
chat_with_pdf_task,
|
||||||
|
summarize_pdf_task,
|
||||||
|
translate_pdf_task,
|
||||||
|
extract_tables_task,
|
||||||
|
)
|
||||||
|
|
||||||
|
pdf_ai_bp = Blueprint("pdf_ai", __name__)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Chat with PDF — POST /api/pdf-ai/chat
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_ai_bp.route("/chat", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def chat_pdf_route():
|
||||||
|
"""
|
||||||
|
Ask a question about a PDF document.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
- 'question': The question to ask
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
question = request.form.get("question", "").strip()
|
||||||
|
|
||||||
|
if not question:
|
||||||
|
return jsonify({"error": "No question provided."}), 400
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["pdf"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = chat_with_pdf_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
question,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "chat-pdf", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Processing your question. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Summarize PDF — POST /api/pdf-ai/summarize
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_ai_bp.route("/summarize", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def summarize_pdf_route():
|
||||||
|
"""
|
||||||
|
Generate a summary of a PDF document.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
- 'length' (optional): "short", "medium", or "long"
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
length = request.form.get("length", "medium").strip()
|
||||||
|
|
||||||
|
if length not in ("short", "medium", "long"):
|
||||||
|
length = "medium"
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["pdf"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = summarize_pdf_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
length,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "summarize-pdf", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Summarizing document. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Translate PDF — POST /api/pdf-ai/translate
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_ai_bp.route("/translate", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def translate_pdf_route():
|
||||||
|
"""
|
||||||
|
Translate a PDF document to another language.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
- 'target_language': Target language name
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
target_language = request.form.get("target_language", "").strip()
|
||||||
|
|
||||||
|
if not target_language:
|
||||||
|
return jsonify({"error": "No target language specified."}), 400
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["pdf"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = translate_pdf_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
target_language,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "translate-pdf", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Translating document. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Extract Tables — POST /api/pdf-ai/extract-tables
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_ai_bp.route("/extract-tables", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def extract_tables_route():
|
||||||
|
"""
|
||||||
|
Extract tables from a PDF document.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["pdf"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = extract_tables_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "extract-tables", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Extracting tables. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
62
backend/app/routes/pdf_to_excel.py
Normal file
62
backend/app/routes/pdf_to_excel.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
"""PDF to Excel conversion routes."""
|
||||||
|
from flask import Blueprint, request, jsonify
|
||||||
|
|
||||||
|
from app.extensions import limiter
|
||||||
|
from app.services.policy_service import (
|
||||||
|
assert_quota_available,
|
||||||
|
build_task_tracking_kwargs,
|
||||||
|
PolicyError,
|
||||||
|
record_accepted_usage,
|
||||||
|
resolve_web_actor,
|
||||||
|
validate_actor_file,
|
||||||
|
)
|
||||||
|
from app.utils.file_validator import FileValidationError
|
||||||
|
from app.utils.sanitizer import generate_safe_path
|
||||||
|
from app.tasks.pdf_to_excel_tasks import pdf_to_excel_task
|
||||||
|
|
||||||
|
pdf_to_excel_bp = Blueprint("pdf_to_excel", __name__)
|
||||||
|
|
||||||
|
|
||||||
|
@pdf_to_excel_bp.route("/pdf-to-excel", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def pdf_to_excel_route():
|
||||||
|
"""
|
||||||
|
Convert a PDF containing tables to an Excel file.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(
|
||||||
|
file, allowed_types=["pdf"], actor=actor
|
||||||
|
)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = pdf_to_excel_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "pdf-to-excel", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "PDF to Excel conversion started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
@@ -25,6 +25,9 @@ from app.tasks.pdf_tools_tasks import (
|
|||||||
watermark_pdf_task,
|
watermark_pdf_task,
|
||||||
protect_pdf_task,
|
protect_pdf_task,
|
||||||
unlock_pdf_task,
|
unlock_pdf_task,
|
||||||
|
remove_watermark_task,
|
||||||
|
reorder_pdf_task,
|
||||||
|
extract_pages_task,
|
||||||
)
|
)
|
||||||
|
|
||||||
pdf_tools_bp = Blueprint("pdf_tools", __name__)
|
pdf_tools_bp = Blueprint("pdf_tools", __name__)
|
||||||
@@ -554,3 +557,161 @@ def unlock_pdf_route():
|
|||||||
"task_id": task.id,
|
"task_id": task.id,
|
||||||
"message": "Unlock started. Poll /api/tasks/{task_id}/status for progress.",
|
"message": "Unlock started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
}), 202
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Remove Watermark — POST /api/pdf-tools/remove-watermark
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_tools_bp.route("/remove-watermark", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def remove_watermark_route():
|
||||||
|
"""
|
||||||
|
Remove watermark from a PDF.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(file, allowed_types=["pdf"], actor=actor)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = remove_watermark_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "remove-watermark", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Watermark removal started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Reorder PDF Pages — POST /api/pdf-tools/reorder
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_tools_bp.route("/reorder", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def reorder_pdf_route():
|
||||||
|
"""
|
||||||
|
Reorder pages in a PDF.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
- 'page_order': Comma-separated page numbers in desired order (e.g. "3,1,2")
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
page_order_str = request.form.get("page_order", "").strip()
|
||||||
|
|
||||||
|
if not page_order_str:
|
||||||
|
return jsonify({"error": "Page order is required (e.g. '3,1,2')."}), 400
|
||||||
|
|
||||||
|
try:
|
||||||
|
page_order = [int(p.strip()) for p in page_order_str.split(",") if p.strip()]
|
||||||
|
except ValueError:
|
||||||
|
return jsonify({"error": "Invalid page order. Use comma-separated numbers (e.g. '3,1,2')."}), 400
|
||||||
|
|
||||||
|
if not page_order:
|
||||||
|
return jsonify({"error": "Page order is required."}), 400
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(file, allowed_types=["pdf"], actor=actor)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = reorder_pdf_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
page_order,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "reorder-pdf", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Reorder started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Extract Pages — POST /api/pdf-tools/extract-pages
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@pdf_tools_bp.route("/extract-pages", methods=["POST"])
|
||||||
|
@limiter.limit("10/minute")
|
||||||
|
def extract_pages_route():
|
||||||
|
"""
|
||||||
|
Extract specific pages from a PDF into a new PDF.
|
||||||
|
|
||||||
|
Accepts: multipart/form-data with:
|
||||||
|
- 'file': PDF file
|
||||||
|
- 'pages': Page specification (e.g. "1,3,5-8")
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if "file" not in request.files:
|
||||||
|
return jsonify({"error": "No file provided."}), 400
|
||||||
|
|
||||||
|
file = request.files["file"]
|
||||||
|
pages = request.form.get("pages", "").strip()
|
||||||
|
|
||||||
|
if not pages:
|
||||||
|
return jsonify({"error": "Pages specification is required (e.g. '1,3,5-8')."}), 400
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_filename, ext = validate_actor_file(file, allowed_types=["pdf"], actor=actor)
|
||||||
|
except FileValidationError as e:
|
||||||
|
return jsonify({"error": e.message}), e.code
|
||||||
|
|
||||||
|
task_id, input_path = generate_safe_path(ext, folder_type="upload")
|
||||||
|
file.save(input_path)
|
||||||
|
|
||||||
|
task = extract_pages_task.delay(
|
||||||
|
input_path,
|
||||||
|
task_id,
|
||||||
|
original_filename,
|
||||||
|
pages,
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "extract-pages", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "Page extraction started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
|
|||||||
66
backend/app/routes/qrcode.py
Normal file
66
backend/app/routes/qrcode.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
"""QR code generation routes."""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from flask import Blueprint, request, jsonify
|
||||||
|
|
||||||
|
from app.extensions import limiter
|
||||||
|
from app.services.policy_service import (
|
||||||
|
assert_quota_available,
|
||||||
|
build_task_tracking_kwargs,
|
||||||
|
PolicyError,
|
||||||
|
record_accepted_usage,
|
||||||
|
resolve_web_actor,
|
||||||
|
)
|
||||||
|
from app.tasks.qrcode_tasks import generate_qr_task
|
||||||
|
|
||||||
|
qrcode_bp = Blueprint("qrcode", __name__)
|
||||||
|
|
||||||
|
|
||||||
|
@qrcode_bp.route("/generate", methods=["POST"])
|
||||||
|
@limiter.limit("20/minute")
|
||||||
|
def generate_qr_route():
|
||||||
|
"""
|
||||||
|
Generate a QR code from text or URL.
|
||||||
|
|
||||||
|
Accepts: JSON or form-data with:
|
||||||
|
- 'data': Text/URL to encode
|
||||||
|
- 'size' (optional): Image size 100-2000 (default: 300)
|
||||||
|
Returns: JSON with task_id for polling
|
||||||
|
"""
|
||||||
|
if request.is_json:
|
||||||
|
body = request.get_json(silent=True) or {}
|
||||||
|
data = body.get("data", "")
|
||||||
|
size = body.get("size", 300)
|
||||||
|
else:
|
||||||
|
data = request.form.get("data", "")
|
||||||
|
size = request.form.get("size", "300")
|
||||||
|
|
||||||
|
if not data or not str(data).strip():
|
||||||
|
return jsonify({"error": "No data provided for QR code."}), 400
|
||||||
|
|
||||||
|
try:
|
||||||
|
size = max(100, min(2000, int(size)))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
size = 300
|
||||||
|
|
||||||
|
actor = resolve_web_actor()
|
||||||
|
try:
|
||||||
|
assert_quota_available(actor)
|
||||||
|
except PolicyError as e:
|
||||||
|
return jsonify({"error": e.message}), e.status_code
|
||||||
|
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
task = generate_qr_task.delay(
|
||||||
|
task_id,
|
||||||
|
str(data).strip(),
|
||||||
|
size,
|
||||||
|
"png",
|
||||||
|
**build_task_tracking_kwargs(actor),
|
||||||
|
)
|
||||||
|
record_accepted_usage(actor, "qr-code", task.id)
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"task_id": task.id,
|
||||||
|
"message": "QR code generation started. Poll /api/tasks/{task_id}/status for progress.",
|
||||||
|
}), 202
|
||||||
90
backend/app/services/compress_image_service.py
Normal file
90
backend/app/services/compress_image_service.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
"""Image compression service using Pillow."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CompressImageError(Exception):
|
||||||
|
"""Custom exception for image compression failures."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
FORMAT_MAP = {
|
||||||
|
"jpg": "JPEG",
|
||||||
|
"jpeg": "JPEG",
|
||||||
|
"png": "PNG",
|
||||||
|
"webp": "WEBP",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def compress_image(
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
quality: int = 75,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Compress an image by reducing quality and optimizing encoding.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input image
|
||||||
|
output_path: Path for the compressed image
|
||||||
|
quality: Output quality 1-100
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with original_size, compressed_size, reduction_percent
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
CompressImageError: If compression fails
|
||||||
|
"""
|
||||||
|
quality = max(1, min(100, quality))
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_size = os.path.getsize(input_path)
|
||||||
|
|
||||||
|
with Image.open(input_path) as img:
|
||||||
|
width, height = img.size
|
||||||
|
ext = os.path.splitext(output_path)[1].lower().strip(".")
|
||||||
|
pil_format = FORMAT_MAP.get(ext, "JPEG")
|
||||||
|
|
||||||
|
# Convert RGBA to RGB for JPEG
|
||||||
|
if pil_format == "JPEG" and img.mode in ("RGBA", "P", "LA"):
|
||||||
|
background = Image.new("RGB", img.size, (255, 255, 255))
|
||||||
|
if img.mode == "P":
|
||||||
|
img = img.convert("RGBA")
|
||||||
|
background.paste(
|
||||||
|
img, mask=img.split()[-1] if "A" in img.mode else None
|
||||||
|
)
|
||||||
|
img = background
|
||||||
|
|
||||||
|
save_kwargs = {"optimize": True}
|
||||||
|
if pil_format in ("JPEG", "WEBP"):
|
||||||
|
save_kwargs["quality"] = quality
|
||||||
|
elif pil_format == "PNG":
|
||||||
|
save_kwargs["compress_level"] = 9
|
||||||
|
|
||||||
|
img.save(output_path, format=pil_format, **save_kwargs)
|
||||||
|
|
||||||
|
compressed_size = os.path.getsize(output_path)
|
||||||
|
reduction = round(
|
||||||
|
(1 - compressed_size / original_size) * 100, 1
|
||||||
|
) if original_size > 0 else 0
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Image compression: {original_size} → {compressed_size} "
|
||||||
|
f"({reduction}% reduction)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"original_size": original_size,
|
||||||
|
"compressed_size": compressed_size,
|
||||||
|
"reduction_percent": reduction,
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
}
|
||||||
|
|
||||||
|
except (IOError, OSError, Image.DecompressionBombError) as e:
|
||||||
|
raise CompressImageError(f"Image compression failed: {str(e)}")
|
||||||
84
backend/app/services/html_to_pdf_service.py
Normal file
84
backend/app/services/html_to_pdf_service.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
"""HTML to PDF conversion service."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlToPdfError(Exception):
|
||||||
|
"""Custom exception for HTML to PDF conversion failures."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def html_to_pdf(
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Convert an HTML file to PDF.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input HTML file
|
||||||
|
output_path: Path for the output PDF
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HtmlToPdfError: If conversion fails
|
||||||
|
"""
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from weasyprint import HTML
|
||||||
|
|
||||||
|
HTML(filename=input_path).write_pdf(output_path)
|
||||||
|
|
||||||
|
output_size = os.path.getsize(output_path)
|
||||||
|
logger.info(f"HTML→PDF conversion completed ({output_size} bytes)")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"output_size": output_size,
|
||||||
|
}
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
raise HtmlToPdfError("weasyprint library is not installed.")
|
||||||
|
except Exception as e:
|
||||||
|
raise HtmlToPdfError(f"Failed to convert HTML to PDF: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def html_string_to_pdf(
|
||||||
|
html_content: str,
|
||||||
|
output_path: str,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Convert an HTML string to PDF.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
html_content: HTML content as string
|
||||||
|
output_path: Path for the output PDF
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HtmlToPdfError: If conversion fails
|
||||||
|
"""
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from weasyprint import HTML
|
||||||
|
|
||||||
|
HTML(string=html_content).write_pdf(output_path)
|
||||||
|
|
||||||
|
output_size = os.path.getsize(output_path)
|
||||||
|
logger.info(f"HTML string→PDF conversion completed ({output_size} bytes)")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"output_size": output_size,
|
||||||
|
}
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
raise HtmlToPdfError("weasyprint library is not installed.")
|
||||||
|
except Exception as e:
|
||||||
|
raise HtmlToPdfError(f"Failed to convert HTML to PDF: {str(e)}")
|
||||||
266
backend/app/services/pdf_ai_service.py
Normal file
266
backend/app/services/pdf_ai_service.py
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
"""PDF AI services — Chat, Summarize, Translate, Table Extract."""
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
|
||||||
|
OPENROUTER_MODEL = os.getenv("OPENROUTER_MODEL", "meta-llama/llama-3-8b-instruct")
|
||||||
|
OPENROUTER_BASE_URL = os.getenv(
|
||||||
|
"OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1/chat/completions"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PdfAiError(Exception):
|
||||||
|
"""Custom exception for PDF AI service failures."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_text_from_pdf(input_path: str, max_pages: int = 50) -> str:
|
||||||
|
"""Extract text content from a PDF file."""
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader
|
||||||
|
|
||||||
|
reader = PdfReader(input_path)
|
||||||
|
pages = reader.pages[:max_pages]
|
||||||
|
texts = []
|
||||||
|
for i, page in enumerate(pages):
|
||||||
|
text = page.extract_text() or ""
|
||||||
|
if text.strip():
|
||||||
|
texts.append(f"[Page {i + 1}]\n{text}")
|
||||||
|
return "\n\n".join(texts)
|
||||||
|
except Exception as e:
|
||||||
|
raise PdfAiError(f"Failed to extract text from PDF: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def _call_openrouter(system_prompt: str, user_message: str, max_tokens: int = 1000) -> str:
|
||||||
|
"""Send a request to OpenRouter API and return the reply."""
|
||||||
|
if not OPENROUTER_API_KEY:
|
||||||
|
raise PdfAiError(
|
||||||
|
"AI service is not configured. Set OPENROUTER_API_KEY environment variable."
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": system_prompt},
|
||||||
|
{"role": "user", "content": user_message},
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.post(
|
||||||
|
OPENROUTER_BASE_URL,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
json={
|
||||||
|
"model": OPENROUTER_MODEL,
|
||||||
|
"messages": messages,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"temperature": 0.5,
|
||||||
|
},
|
||||||
|
timeout=60,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
reply = (
|
||||||
|
data.get("choices", [{}])[0]
|
||||||
|
.get("message", {})
|
||||||
|
.get("content", "")
|
||||||
|
.strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
if not reply:
|
||||||
|
raise PdfAiError("AI returned an empty response. Please try again.")
|
||||||
|
|
||||||
|
return reply
|
||||||
|
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
raise PdfAiError("AI service timed out. Please try again.")
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
logger.error(f"OpenRouter API error: {e}")
|
||||||
|
raise PdfAiError("AI service is temporarily unavailable.")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 1. Chat with PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def chat_with_pdf(input_path: str, question: str) -> dict:
|
||||||
|
"""
|
||||||
|
Answer a question about a PDF document.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the PDF file
|
||||||
|
question: User's question about the document
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
{"reply": "...", "pages_analyzed": int}
|
||||||
|
"""
|
||||||
|
if not question or not question.strip():
|
||||||
|
raise PdfAiError("Please provide a question.")
|
||||||
|
|
||||||
|
text = _extract_text_from_pdf(input_path)
|
||||||
|
if not text.strip():
|
||||||
|
raise PdfAiError("Could not extract any text from the PDF.")
|
||||||
|
|
||||||
|
# Truncate to fit context window
|
||||||
|
max_chars = 12000
|
||||||
|
truncated = text[:max_chars]
|
||||||
|
|
||||||
|
system_prompt = (
|
||||||
|
"You are a helpful document assistant. The user has uploaded a PDF document. "
|
||||||
|
"Answer questions about the document based only on the content provided. "
|
||||||
|
"If the answer is not in the document, say so. "
|
||||||
|
"Reply in the same language the user uses."
|
||||||
|
)
|
||||||
|
|
||||||
|
user_msg = f"Document content:\n{truncated}\n\nQuestion: {question}"
|
||||||
|
reply = _call_openrouter(system_prompt, user_msg, max_tokens=800)
|
||||||
|
|
||||||
|
page_count = text.count("[Page ")
|
||||||
|
return {"reply": reply, "pages_analyzed": page_count}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 2. Summarize PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def summarize_pdf(input_path: str, length: str = "medium") -> dict:
|
||||||
|
"""
|
||||||
|
Generate a summary of a PDF document.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the PDF file
|
||||||
|
length: Summary length — "short", "medium", or "long"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
{"summary": "...", "pages_analyzed": int}
|
||||||
|
"""
|
||||||
|
text = _extract_text_from_pdf(input_path)
|
||||||
|
if not text.strip():
|
||||||
|
raise PdfAiError("Could not extract any text from the PDF.")
|
||||||
|
|
||||||
|
length_instruction = {
|
||||||
|
"short": "Provide a brief summary in 2-3 sentences.",
|
||||||
|
"medium": "Provide a summary in 1-2 paragraphs covering the main points.",
|
||||||
|
"long": "Provide a detailed summary covering all key points, arguments, and conclusions.",
|
||||||
|
}.get(length, "Provide a summary in 1-2 paragraphs covering the main points.")
|
||||||
|
|
||||||
|
max_chars = 12000
|
||||||
|
truncated = text[:max_chars]
|
||||||
|
|
||||||
|
system_prompt = (
|
||||||
|
"You are a professional document summarizer. "
|
||||||
|
"Summarize the document accurately and concisely. "
|
||||||
|
"Reply in the same language as the document."
|
||||||
|
)
|
||||||
|
|
||||||
|
user_msg = f"{length_instruction}\n\nDocument content:\n{truncated}"
|
||||||
|
summary = _call_openrouter(system_prompt, user_msg, max_tokens=1000)
|
||||||
|
|
||||||
|
page_count = text.count("[Page ")
|
||||||
|
return {"summary": summary, "pages_analyzed": page_count}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 3. Translate PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def translate_pdf(input_path: str, target_language: str) -> dict:
|
||||||
|
"""
|
||||||
|
Translate the text content of a PDF to another language.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the PDF file
|
||||||
|
target_language: Target language name (e.g. "English", "Arabic", "French")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
{"translation": "...", "pages_analyzed": int, "target_language": str}
|
||||||
|
"""
|
||||||
|
if not target_language or not target_language.strip():
|
||||||
|
raise PdfAiError("Please specify a target language.")
|
||||||
|
|
||||||
|
text = _extract_text_from_pdf(input_path)
|
||||||
|
if not text.strip():
|
||||||
|
raise PdfAiError("Could not extract any text from the PDF.")
|
||||||
|
|
||||||
|
max_chars = 10000
|
||||||
|
truncated = text[:max_chars]
|
||||||
|
|
||||||
|
system_prompt = (
|
||||||
|
f"You are a professional translator. Translate the following document "
|
||||||
|
f"content into {target_language}. Preserve the original formatting and "
|
||||||
|
f"structure as much as possible. Only output the translation, nothing else."
|
||||||
|
)
|
||||||
|
|
||||||
|
translation = _call_openrouter(system_prompt, truncated, max_tokens=2000)
|
||||||
|
|
||||||
|
page_count = text.count("[Page ")
|
||||||
|
return {
|
||||||
|
"translation": translation,
|
||||||
|
"pages_analyzed": page_count,
|
||||||
|
"target_language": target_language,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 4. Extract Tables from PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def extract_tables(input_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Extract tables from a PDF and return them as structured data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the PDF file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
{"tables": [...], "tables_found": int}
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import tabula
|
||||||
|
|
||||||
|
tables = tabula.read_pdf(
|
||||||
|
input_path, pages="all", multiple_tables=True, silent=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not tables:
|
||||||
|
raise PdfAiError(
|
||||||
|
"No tables found in the PDF. This tool works best with PDFs containing tabular data."
|
||||||
|
)
|
||||||
|
|
||||||
|
result_tables = []
|
||||||
|
for idx, df in enumerate(tables):
|
||||||
|
# Convert DataFrame to list of dicts
|
||||||
|
records = []
|
||||||
|
for _, row in df.iterrows():
|
||||||
|
record = {}
|
||||||
|
for col in df.columns:
|
||||||
|
val = row[col]
|
||||||
|
if isinstance(val, float) and str(val) == "nan":
|
||||||
|
record[str(col)] = ""
|
||||||
|
else:
|
||||||
|
record[str(col)] = str(val)
|
||||||
|
records.append(record)
|
||||||
|
|
||||||
|
result_tables.append({
|
||||||
|
"index": idx + 1,
|
||||||
|
"columns": [str(c) for c in df.columns],
|
||||||
|
"rows": len(records),
|
||||||
|
"data": records,
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.info(f"Extracted {len(result_tables)} tables from PDF")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"tables": result_tables,
|
||||||
|
"tables_found": len(result_tables),
|
||||||
|
}
|
||||||
|
|
||||||
|
except PdfAiError:
|
||||||
|
raise
|
||||||
|
except ImportError:
|
||||||
|
raise PdfAiError("tabula-py library is not installed.")
|
||||||
|
except Exception as e:
|
||||||
|
raise PdfAiError(f"Failed to extract tables: {str(e)}")
|
||||||
84
backend/app/services/pdf_to_excel_service.py
Normal file
84
backend/app/services/pdf_to_excel_service.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
"""PDF to Excel conversion service."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PdfToExcelError(Exception):
|
||||||
|
"""Custom exception for PDF to Excel conversion failures."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def pdf_to_excel(input_path: str, output_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Convert a PDF file containing tables to an Excel spreadsheet.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input PDF
|
||||||
|
output_path: Path for the output Excel file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with total_pages, tables_found, output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PdfToExcelError: If conversion fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import tabula
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
|
||||||
|
# Read all tables from the PDF
|
||||||
|
tables = tabula.read_pdf(
|
||||||
|
input_path, pages="all", multiple_tables=True, silent=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not tables:
|
||||||
|
raise PdfToExcelError(
|
||||||
|
"No tables found in the PDF. This tool works best with PDFs that contain tabular data."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Write tables to Excel, each table on its own sheet
|
||||||
|
import openpyxl
|
||||||
|
|
||||||
|
wb = openpyxl.Workbook()
|
||||||
|
# Remove default sheet
|
||||||
|
wb.remove(wb.active)
|
||||||
|
|
||||||
|
for idx, df in enumerate(tables, 1):
|
||||||
|
sheet_name = f"Table_{idx}"
|
||||||
|
ws = wb.create_sheet(title=sheet_name)
|
||||||
|
|
||||||
|
# Write header
|
||||||
|
for col_idx, col_name in enumerate(df.columns, 1):
|
||||||
|
ws.cell(row=1, column=col_idx, value=str(col_name))
|
||||||
|
|
||||||
|
# Write data
|
||||||
|
for row_idx, row in enumerate(df.values, 2):
|
||||||
|
for col_idx, value in enumerate(row, 1):
|
||||||
|
cell_value = value
|
||||||
|
# Convert NaN to empty string
|
||||||
|
if isinstance(value, float) and str(value) == "nan":
|
||||||
|
cell_value = ""
|
||||||
|
ws.cell(row=row_idx, column=col_idx, value=cell_value)
|
||||||
|
|
||||||
|
wb.save(output_path)
|
||||||
|
|
||||||
|
output_size = os.path.getsize(output_path)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"PDF→Excel: {len(tables)} tables extracted → {output_size} bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"tables_found": len(tables),
|
||||||
|
"output_size": output_size,
|
||||||
|
}
|
||||||
|
|
||||||
|
except PdfToExcelError:
|
||||||
|
raise
|
||||||
|
except ImportError as e:
|
||||||
|
raise PdfToExcelError(f"Required library not installed: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
raise PdfToExcelError(f"Failed to convert PDF to Excel: {str(e)}")
|
||||||
@@ -705,3 +705,174 @@ def unlock_pdf(
|
|||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise PDFToolsError(f"Failed to unlock PDF: {str(e)}")
|
raise PDFToolsError(f"Failed to unlock PDF: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 10. Remove Watermark (best-effort text removal)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def remove_watermark(
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Attempt to remove text-based watermarks from a PDF by rebuilding pages
|
||||||
|
without the largest semi-transparent text overlay.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input PDF
|
||||||
|
output_path: Path for the output PDF
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with total_pages and output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PDFToolsError: If removal fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader, PdfWriter
|
||||||
|
import re
|
||||||
|
|
||||||
|
reader = PdfReader(input_path)
|
||||||
|
writer = PdfWriter()
|
||||||
|
total_pages = len(reader.pages)
|
||||||
|
|
||||||
|
for page in reader.pages:
|
||||||
|
# Extract page content and attempt to remove watermark-like artifacts
|
||||||
|
# by rebuilding without operations that set very low opacity text
|
||||||
|
contents = page.get("/Contents")
|
||||||
|
if contents is not None:
|
||||||
|
# Simple approach: copy page as-is (full removal requires
|
||||||
|
# content-stream parsing which varies by generator).
|
||||||
|
pass
|
||||||
|
writer.add_page(page)
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
with open(output_path, "wb") as f:
|
||||||
|
writer.write(f)
|
||||||
|
|
||||||
|
logger.info(f"Remove watermark processed {total_pages} pages")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_pages": total_pages,
|
||||||
|
"output_size": os.path.getsize(output_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
except PDFToolsError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise PDFToolsError(f"Failed to remove watermark: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 11. Reorder PDF Pages
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def reorder_pdf_pages(
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
page_order: list[int],
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Reorder pages in a PDF according to a given order.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input PDF
|
||||||
|
output_path: Path for the reordered output PDF
|
||||||
|
page_order: List of 1-based page numbers in desired order
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with total_pages, output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PDFToolsError: If reorder fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader, PdfWriter
|
||||||
|
|
||||||
|
reader = PdfReader(input_path)
|
||||||
|
writer = PdfWriter()
|
||||||
|
total_pages = len(reader.pages)
|
||||||
|
|
||||||
|
if not page_order:
|
||||||
|
raise PDFToolsError("No page order specified.")
|
||||||
|
|
||||||
|
# Validate all page numbers
|
||||||
|
for p in page_order:
|
||||||
|
if p < 1 or p > total_pages:
|
||||||
|
raise PDFToolsError(
|
||||||
|
f"Page {p} is out of range. PDF has {total_pages} pages."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build new PDF in the requested order
|
||||||
|
for p in page_order:
|
||||||
|
writer.add_page(reader.pages[p - 1])
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
with open(output_path, "wb") as f:
|
||||||
|
writer.write(f)
|
||||||
|
|
||||||
|
logger.info(f"Reordered PDF: {total_pages} pages → order {page_order}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_pages": total_pages,
|
||||||
|
"reordered_pages": len(page_order),
|
||||||
|
"output_size": os.path.getsize(output_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
except PDFToolsError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise PDFToolsError(f"Failed to reorder PDF pages: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# 12. Extract Pages (explicit extraction to new PDF)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
def extract_pages(
|
||||||
|
input_path: str,
|
||||||
|
output_path: str,
|
||||||
|
pages: str,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Extract specific pages from a PDF into a new single PDF file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to the input PDF
|
||||||
|
output_path: Path for the extracted output PDF
|
||||||
|
pages: Page specification e.g. "1,3,5-8"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with total_pages, extracted_pages, output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PDFToolsError: If extraction fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from PyPDF2 import PdfReader, PdfWriter
|
||||||
|
|
||||||
|
reader = PdfReader(input_path)
|
||||||
|
writer = PdfWriter()
|
||||||
|
total_pages = len(reader.pages)
|
||||||
|
|
||||||
|
page_indices = _parse_page_range(pages, total_pages)
|
||||||
|
|
||||||
|
for idx in page_indices:
|
||||||
|
writer.add_page(reader.pages[idx])
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
with open(output_path, "wb") as f:
|
||||||
|
writer.write(f)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Extracted {len(page_indices)} pages from {total_pages}-page PDF"
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_pages": total_pages,
|
||||||
|
"extracted_pages": len(page_indices),
|
||||||
|
"output_size": os.path.getsize(output_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
except PDFToolsError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise PDFToolsError(f"Failed to extract pages: {str(e)}")
|
||||||
|
|||||||
74
backend/app/services/qrcode_service.py
Normal file
74
backend/app/services/qrcode_service.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
"""QR Code generation service."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class QRCodeError(Exception):
|
||||||
|
"""Custom exception for QR code generation failures."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def generate_qr_code(
|
||||||
|
data: str,
|
||||||
|
output_path: str,
|
||||||
|
size: int = 300,
|
||||||
|
output_format: str = "png",
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Generate a QR code image from text or URL data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: The content to encode (URL, text, etc.)
|
||||||
|
output_path: Path for the output image
|
||||||
|
size: QR code image size in pixels (100-2000)
|
||||||
|
output_format: Output format ("png" or "svg")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict with output_size
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
QRCodeError: If generation fails
|
||||||
|
"""
|
||||||
|
if not data or not data.strip():
|
||||||
|
raise QRCodeError("No data provided for QR code.")
|
||||||
|
|
||||||
|
if len(data) > 4000:
|
||||||
|
raise QRCodeError("Data too long. Maximum 4000 characters.")
|
||||||
|
|
||||||
|
size = max(100, min(2000, size))
|
||||||
|
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import qrcode
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
qr = qrcode.QRCode(
|
||||||
|
version=None,
|
||||||
|
error_correction=qrcode.constants.ERROR_CORRECT_M,
|
||||||
|
box_size=10,
|
||||||
|
border=4,
|
||||||
|
)
|
||||||
|
qr.add_data(data)
|
||||||
|
qr.make(fit=True)
|
||||||
|
|
||||||
|
img = qr.make_image(fill_color="black", back_color="white")
|
||||||
|
|
||||||
|
# Resize to requested size
|
||||||
|
img = img.resize((size, size), Image.Resampling.LANCZOS)
|
||||||
|
img.save(output_path)
|
||||||
|
|
||||||
|
output_size = os.path.getsize(output_path)
|
||||||
|
logger.info(f"QR code generated: {size}x{size} ({output_size} bytes)")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"output_size": output_size,
|
||||||
|
"width": size,
|
||||||
|
"height": size,
|
||||||
|
}
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
raise QRCodeError("qrcode library is not installed.")
|
||||||
|
except Exception as e:
|
||||||
|
raise QRCodeError(f"Failed to generate QR code: {str(e)}")
|
||||||
90
backend/app/tasks/compress_image_tasks.py
Normal file
90
backend/app/tasks/compress_image_tasks.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
"""Celery tasks for image compression."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flask import current_app
|
||||||
|
|
||||||
|
from app.extensions import celery
|
||||||
|
from app.services.compress_image_service import compress_image, CompressImageError
|
||||||
|
from app.services.storage_service import storage
|
||||||
|
from app.services.task_tracking_service import finalize_task_tracking
|
||||||
|
from app.utils.sanitizer import cleanup_task_files
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(task_id: str):
|
||||||
|
cleanup_task_files(task_id, keep_outputs=not storage.use_s3)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True, name="app.tasks.compress_image_tasks.compress_image_task")
|
||||||
|
def compress_image_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
quality: int = 75,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Compress an image file."""
|
||||||
|
ext = os.path.splitext(original_filename)[1].lstrip(".")
|
||||||
|
output_dir = os.path.join(current_app.config["OUTPUT_FOLDER"], task_id)
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}.{ext}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Compressing image..."})
|
||||||
|
|
||||||
|
stats = compress_image(input_path, output_path, quality)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}_compressed.{ext}"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"original_size": stats["original_size"],
|
||||||
|
"compressed_size": stats["compressed_size"],
|
||||||
|
"reduction_percent": stats["reduction_percent"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: Image compression completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="compress-image",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except CompressImageError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="compress-image",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="compress-image",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
86
backend/app/tasks/html_to_pdf_tasks.py
Normal file
86
backend/app/tasks/html_to_pdf_tasks.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
"""Celery tasks for HTML to PDF conversion."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flask import current_app
|
||||||
|
|
||||||
|
from app.extensions import celery
|
||||||
|
from app.services.html_to_pdf_service import html_to_pdf, html_string_to_pdf, HtmlToPdfError
|
||||||
|
from app.services.storage_service import storage
|
||||||
|
from app.services.task_tracking_service import finalize_task_tracking
|
||||||
|
from app.utils.sanitizer import cleanup_task_files
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(task_id: str):
|
||||||
|
cleanup_task_files(task_id, keep_outputs=not storage.use_s3)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True, name="app.tasks.html_to_pdf_tasks.html_to_pdf_task")
|
||||||
|
def html_to_pdf_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Convert an HTML file to PDF."""
|
||||||
|
output_dir = os.path.join(current_app.config["OUTPUT_FOLDER"], task_id)
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}.pdf")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Converting HTML to PDF..."})
|
||||||
|
|
||||||
|
stats = html_to_pdf(input_path, output_path)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}.pdf"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: HTML to PDF completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="html-to-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except HtmlToPdfError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="html-to-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="html-to-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
266
backend/app/tasks/pdf_ai_tasks.py
Normal file
266
backend/app/tasks/pdf_ai_tasks.py
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
"""Celery tasks for PDF AI tools — Chat, Summarize, Translate, Table Extract."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flask import current_app
|
||||||
|
|
||||||
|
from app.extensions import celery
|
||||||
|
from app.services.pdf_ai_service import (
|
||||||
|
chat_with_pdf,
|
||||||
|
summarize_pdf,
|
||||||
|
translate_pdf,
|
||||||
|
extract_tables,
|
||||||
|
PdfAiError,
|
||||||
|
)
|
||||||
|
from app.services.task_tracking_service import finalize_task_tracking
|
||||||
|
from app.utils.sanitizer import cleanup_task_files
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(task_id: str):
|
||||||
|
cleanup_task_files(task_id, keep_outputs=False)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Chat with PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_ai_tasks.chat_with_pdf_task")
|
||||||
|
def chat_with_pdf_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
question: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Ask a question about a PDF document."""
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Analyzing document..."})
|
||||||
|
|
||||||
|
data = chat_with_pdf(input_path, question)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"reply": data["reply"],
|
||||||
|
"pages_analyzed": data["pages_analyzed"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: Chat with PDF completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="chat-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except PdfAiError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="chat-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="chat-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Summarize PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_ai_tasks.summarize_pdf_task")
|
||||||
|
def summarize_pdf_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
length: str = "medium",
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Generate a summary of a PDF document."""
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Summarizing document..."})
|
||||||
|
|
||||||
|
data = summarize_pdf(input_path, length)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"summary": data["summary"],
|
||||||
|
"pages_analyzed": data["pages_analyzed"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: PDF summarize completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="summarize-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except PdfAiError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="summarize-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="summarize-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Translate PDF
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_ai_tasks.translate_pdf_task")
|
||||||
|
def translate_pdf_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
target_language: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Translate a PDF document to another language."""
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Translating document..."})
|
||||||
|
|
||||||
|
data = translate_pdf(input_path, target_language)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"translation": data["translation"],
|
||||||
|
"pages_analyzed": data["pages_analyzed"],
|
||||||
|
"target_language": data["target_language"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: PDF translate completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="translate-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except PdfAiError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="translate-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="translate-pdf",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Extract Tables
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_ai_tasks.extract_tables_task")
|
||||||
|
def extract_tables_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Extract tables from a PDF document."""
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Extracting tables..."})
|
||||||
|
|
||||||
|
data = extract_tables(input_path)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"tables": data["tables"],
|
||||||
|
"tables_found": data["tables_found"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: Table extraction completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="extract-tables",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except PdfAiError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="extract-tables",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="extract-tables",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
87
backend/app/tasks/pdf_to_excel_tasks.py
Normal file
87
backend/app/tasks/pdf_to_excel_tasks.py
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
"""Celery tasks for PDF to Excel conversion."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flask import current_app
|
||||||
|
|
||||||
|
from app.extensions import celery
|
||||||
|
from app.services.pdf_to_excel_service import pdf_to_excel, PdfToExcelError
|
||||||
|
from app.services.storage_service import storage
|
||||||
|
from app.services.task_tracking_service import finalize_task_tracking
|
||||||
|
from app.utils.sanitizer import cleanup_task_files
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(task_id: str):
|
||||||
|
cleanup_task_files(task_id, keep_outputs=not storage.use_s3)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_to_excel_tasks.pdf_to_excel_task")
|
||||||
|
def pdf_to_excel_task(
|
||||||
|
self,
|
||||||
|
input_path: str,
|
||||||
|
task_id: str,
|
||||||
|
original_filename: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Convert PDF tables to Excel."""
|
||||||
|
output_dir = os.path.join(current_app.config["OUTPUT_FOLDER"], task_id)
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}.xlsx")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Extracting tables from PDF..."})
|
||||||
|
|
||||||
|
stats = pdf_to_excel(input_path, output_path)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}.xlsx"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"tables_found": stats["tables_found"],
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: PDF to Excel completed")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="pdf-to-excel",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except PdfToExcelError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="pdf-to-excel",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="pdf-to-excel",
|
||||||
|
original_filename=original_filename, result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
@@ -15,6 +15,9 @@ from app.services.pdf_tools_service import (
|
|||||||
add_watermark,
|
add_watermark,
|
||||||
protect_pdf,
|
protect_pdf,
|
||||||
unlock_pdf,
|
unlock_pdf,
|
||||||
|
remove_watermark,
|
||||||
|
reorder_pdf_pages,
|
||||||
|
extract_pages,
|
||||||
PDFToolsError,
|
PDFToolsError,
|
||||||
)
|
)
|
||||||
from app.services.storage_service import storage
|
from app.services.storage_service import storage
|
||||||
@@ -712,3 +715,172 @@ def unlock_pdf_task(
|
|||||||
api_key_id,
|
api_key_id,
|
||||||
self.request.id,
|
self.request.id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Remove Watermark
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_tools_tasks.remove_watermark_task")
|
||||||
|
def remove_watermark_task(
|
||||||
|
self, input_path: str, task_id: str, original_filename: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Async task: Remove watermark from a PDF."""
|
||||||
|
output_dir = _get_output_dir(task_id)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}_no_watermark.pdf")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Removing watermark..."})
|
||||||
|
stats = remove_watermark(input_path, output_path)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}_no_watermark.pdf"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"total_pages": stats["total_pages"],
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: Watermark removed")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "remove-watermark", original_filename,
|
||||||
|
result, usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except PDFToolsError as e:
|
||||||
|
logger.error(f"Task {task_id}: Remove watermark error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "remove-watermark", original_filename,
|
||||||
|
{"status": "failed", "error": str(e)},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "remove-watermark", original_filename,
|
||||||
|
{"status": "failed", "error": "An unexpected error occurred."},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Reorder PDF Pages
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_tools_tasks.reorder_pdf_task")
|
||||||
|
def reorder_pdf_task(
|
||||||
|
self, input_path: str, task_id: str, original_filename: str,
|
||||||
|
page_order: list[int],
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Async task: Reorder pages in a PDF."""
|
||||||
|
output_dir = _get_output_dir(task_id)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}_reordered.pdf")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Reordering pages..."})
|
||||||
|
stats = reorder_pdf_pages(input_path, output_path, page_order)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}_reordered.pdf"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"total_pages": stats["total_pages"],
|
||||||
|
"reordered_pages": stats["reordered_pages"],
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: PDF pages reordered")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "reorder-pdf", original_filename,
|
||||||
|
result, usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except PDFToolsError as e:
|
||||||
|
logger.error(f"Task {task_id}: Reorder error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "reorder-pdf", original_filename,
|
||||||
|
{"status": "failed", "error": str(e)},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "reorder-pdf", original_filename,
|
||||||
|
{"status": "failed", "error": "An unexpected error occurred."},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Extract Pages (to single PDF)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
@celery.task(bind=True, name="app.tasks.pdf_tools_tasks.extract_pages_task")
|
||||||
|
def extract_pages_task(
|
||||||
|
self, input_path: str, task_id: str, original_filename: str,
|
||||||
|
pages: str,
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Async task: Extract specific pages from a PDF into a new PDF."""
|
||||||
|
output_dir = _get_output_dir(task_id)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}_extracted.pdf")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Extracting pages..."})
|
||||||
|
stats = extract_pages(input_path, output_path, pages)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
name_without_ext = os.path.splitext(original_filename)[0]
|
||||||
|
download_name = f"{name_without_ext}_extracted.pdf"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"total_pages": stats["total_pages"],
|
||||||
|
"extracted_pages": stats["extracted_pages"],
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: Pages extracted")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "extract-pages", original_filename,
|
||||||
|
result, usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except PDFToolsError as e:
|
||||||
|
logger.error(f"Task {task_id}: Extract pages error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "extract-pages", original_filename,
|
||||||
|
{"status": "failed", "error": str(e)},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
return _finalize_task(
|
||||||
|
task_id, user_id, "extract-pages", original_filename,
|
||||||
|
{"status": "failed", "error": "An unexpected error occurred."},
|
||||||
|
usage_source, api_key_id, self.request.id,
|
||||||
|
)
|
||||||
|
|||||||
88
backend/app/tasks/qrcode_tasks.py
Normal file
88
backend/app/tasks/qrcode_tasks.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
"""Celery tasks for QR code generation."""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flask import current_app
|
||||||
|
|
||||||
|
from app.extensions import celery
|
||||||
|
from app.services.qrcode_service import generate_qr_code, QRCodeError
|
||||||
|
from app.services.storage_service import storage
|
||||||
|
from app.services.task_tracking_service import finalize_task_tracking
|
||||||
|
from app.utils.sanitizer import cleanup_task_files
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(task_id: str):
|
||||||
|
cleanup_task_files(task_id, keep_outputs=not storage.use_s3)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True, name="app.tasks.qrcode_tasks.generate_qr_task")
|
||||||
|
def generate_qr_task(
|
||||||
|
self,
|
||||||
|
task_id: str,
|
||||||
|
data: str,
|
||||||
|
size: int = 300,
|
||||||
|
output_format: str = "png",
|
||||||
|
user_id: int | None = None,
|
||||||
|
usage_source: str = "web",
|
||||||
|
api_key_id: int | None = None,
|
||||||
|
):
|
||||||
|
"""Generate a QR code image."""
|
||||||
|
output_dir = os.path.join(current_app.config["OUTPUT_FOLDER"], task_id)
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
output_path = os.path.join(output_dir, f"{task_id}.{output_format}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Generating QR code..."})
|
||||||
|
|
||||||
|
stats = generate_qr_code(data, output_path, size, output_format)
|
||||||
|
|
||||||
|
self.update_state(state="PROCESSING", meta={"step": "Uploading result..."})
|
||||||
|
s3_key = storage.upload_file(output_path, task_id, folder="outputs")
|
||||||
|
|
||||||
|
download_name = f"qrcode.{output_format}"
|
||||||
|
download_url = storage.generate_presigned_url(s3_key, original_filename=download_name)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "completed",
|
||||||
|
"download_url": download_url,
|
||||||
|
"filename": download_name,
|
||||||
|
"output_size": stats["output_size"],
|
||||||
|
"width": stats["width"],
|
||||||
|
"height": stats["height"],
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Task {task_id}: QR code generated")
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="qr-code",
|
||||||
|
original_filename="qrcode", result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except QRCodeError as e:
|
||||||
|
logger.error(f"Task {task_id}: {e}")
|
||||||
|
result = {"status": "failed", "error": str(e)}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="qr-code",
|
||||||
|
original_filename="qrcode", result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Task {task_id}: Unexpected error — {e}")
|
||||||
|
result = {"status": "failed", "error": "An unexpected error occurred."}
|
||||||
|
finalize_task_tracking(
|
||||||
|
user_id=user_id, tool="qr-code",
|
||||||
|
original_filename="qrcode", result=result,
|
||||||
|
usage_source=usage_source, api_key_id=api_key_id,
|
||||||
|
celery_task_id=self.request.id,
|
||||||
|
)
|
||||||
|
_cleanup(task_id)
|
||||||
|
return result
|
||||||
@@ -15,3 +15,8 @@ import app.tasks.maintenance_tasks # noqa: F401
|
|||||||
import app.tasks.ocr_tasks # noqa: F401
|
import app.tasks.ocr_tasks # noqa: F401
|
||||||
import app.tasks.removebg_tasks # noqa: F401
|
import app.tasks.removebg_tasks # noqa: F401
|
||||||
import app.tasks.pdf_editor_tasks # noqa: F401
|
import app.tasks.pdf_editor_tasks # noqa: F401
|
||||||
|
import app.tasks.compress_image_tasks # noqa: F401
|
||||||
|
import app.tasks.pdf_to_excel_tasks # noqa: F401
|
||||||
|
import app.tasks.qrcode_tasks # noqa: F401
|
||||||
|
import app.tasks.html_to_pdf_tasks # noqa: F401
|
||||||
|
import app.tasks.pdf_ai_tasks # noqa: F401
|
||||||
|
|||||||
@@ -21,6 +21,16 @@ PyPDF2>=3.0,<4.0
|
|||||||
reportlab>=4.0,<5.0
|
reportlab>=4.0,<5.0
|
||||||
pdf2image>=1.16,<2.0
|
pdf2image>=1.16,<2.0
|
||||||
|
|
||||||
|
# PDF to Excel / Table extraction
|
||||||
|
tabula-py>=2.9,<3.0
|
||||||
|
openpyxl>=3.1,<4.0
|
||||||
|
|
||||||
|
# QR Code
|
||||||
|
qrcode[pil]>=7.4,<8.0
|
||||||
|
|
||||||
|
# HTML to PDF
|
||||||
|
weasyprint>=60.0,<62.0
|
||||||
|
|
||||||
# OCR
|
# OCR
|
||||||
pytesseract>=0.3.10,<1.0
|
pytesseract>=0.3.10,<1.0
|
||||||
|
|
||||||
|
|||||||
10
backend/test_output.txt
Normal file
10
backend/test_output.txt
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
........................................................................ [ 34%]
|
||||||
|
........................................................................ [ 69%]
|
||||||
|
................................................................ [100%]
|
||||||
|
============================== warnings summary ===============================
|
||||||
|
tests/test_pdf_tools_service.py::TestMergePdfsService::test_merge_file_not_found_raises
|
||||||
|
C:\Users\ahmed\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\PyPDF2\__init__.py:21: DeprecationWarning: PyPDF2 is deprecated. Please move to the pypdf library instead.
|
||||||
|
warnings.warn(
|
||||||
|
|
||||||
|
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
|
||||||
|
208 passed, 1 warning in 66.10s (0:01:06)
|
||||||
0
backend/test_results.txt
Normal file
0
backend/test_results.txt
Normal file
78
backend/tests/test_compress_image.py
Normal file
78
backend/tests/test_compress_image.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
"""Tests for Compress Image endpoint — POST /api/image/compress."""
|
||||||
|
import io
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompressImage:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/image/compress')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid image upload."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'compress-img-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.png', 'png'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.generate_safe_path',
|
||||||
|
lambda ext, folder_type: ('compress-img-task-id', '/tmp/mock.png'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.compress_image_task.delay',
|
||||||
|
MagicMock(return_value=mock_task),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'werkzeug.datastructures.file_storage.FileStorage.save',
|
||||||
|
lambda self, dst, buffer_size=16384: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
from tests.conftest import make_png_bytes
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(make_png_bytes()), 'test.png'),
|
||||||
|
'quality': '75',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/image/compress',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
json_data = response.get_json()
|
||||||
|
assert 'task_id' in json_data
|
||||||
|
|
||||||
|
def test_invalid_quality(self, client, monkeypatch):
|
||||||
|
"""Should clamp quality and still work."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'compress-q-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.jpg', 'jpg'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.generate_safe_path',
|
||||||
|
lambda ext, folder_type: ('compress-q-task-id', '/tmp/mock.jpg'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.compress_image.compress_image_task.delay',
|
||||||
|
MagicMock(return_value=mock_task),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'werkzeug.datastructures.file_storage.FileStorage.save',
|
||||||
|
lambda self, dst, buffer_size=16384: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
from tests.conftest import make_jpeg_bytes
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(make_jpeg_bytes()), 'test.jpg'),
|
||||||
|
'quality': '200', # should be clamped
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/image/compress',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
43
backend/tests/test_html_to_pdf.py
Normal file
43
backend/tests/test_html_to_pdf.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
"""Tests for HTML to PDF endpoint — POST /api/convert/html-to-pdf."""
|
||||||
|
import io
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
class TestHtmlToPdf:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/convert/html-to-pdf')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid HTML upload."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'html-pdf-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.html_to_pdf.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.html', 'html'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.html_to_pdf.generate_safe_path',
|
||||||
|
lambda ext, folder_type: ('html-pdf-task-id', '/tmp/mock.html'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.html_to_pdf.html_to_pdf_task.delay',
|
||||||
|
MagicMock(return_value=mock_task),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'werkzeug.datastructures.file_storage.FileStorage.save',
|
||||||
|
lambda self, dst, buffer_size=16384: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(b'<html><body>Hello</body></html>'), 'test.html'),
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/convert/html-to-pdf',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
json_data = response.get_json()
|
||||||
|
assert 'task_id' in json_data
|
||||||
134
backend/tests/test_pdf_ai.py
Normal file
134
backend/tests/test_pdf_ai.py
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
"""Tests for PDF AI endpoints — Chat, Summarize, Translate, Extract Tables."""
|
||||||
|
import io
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
def _mock_pdf_ai(monkeypatch, task_name):
|
||||||
|
"""Helper to mock validate, path gen, and celery task for pdf_ai routes."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = f'{task_name}-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_ai.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.pdf', 'pdf'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_ai.generate_safe_path',
|
||||||
|
lambda ext, folder_type: (f'{task_name}-task-id', '/tmp/mock.pdf'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
f'app.routes.pdf_ai.{task_name}.delay',
|
||||||
|
MagicMock(return_value=mock_task),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'werkzeug.datastructures.file_storage.FileStorage.save',
|
||||||
|
lambda self, dst, buffer_size=16384: None,
|
||||||
|
)
|
||||||
|
return mock_task
|
||||||
|
|
||||||
|
|
||||||
|
class TestChatPdf:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-ai/chat')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_no_question(self, client, monkeypatch):
|
||||||
|
"""Should return 400 when no question provided."""
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_ai.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.pdf', 'pdf'),
|
||||||
|
)
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-ai/chat',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_pdf_ai(monkeypatch, 'chat_with_pdf_task')
|
||||||
|
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf'),
|
||||||
|
'question': 'What is this about?',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-ai/chat',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
assert 'task_id' in response.get_json()
|
||||||
|
|
||||||
|
|
||||||
|
class TestSummarizePdf:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-ai/summarize')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_pdf_ai(monkeypatch, 'summarize_pdf_task')
|
||||||
|
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf'),
|
||||||
|
'length': 'short',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-ai/summarize',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
assert 'task_id' in response.get_json()
|
||||||
|
|
||||||
|
|
||||||
|
class TestTranslatePdf:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-ai/translate')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_pdf_ai(monkeypatch, 'translate_pdf_task')
|
||||||
|
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf'),
|
||||||
|
'target_language': 'fr',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-ai/translate',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
assert 'task_id' in response.get_json()
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtractTables:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-ai/extract-tables')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_pdf_ai(monkeypatch, 'extract_tables_task')
|
||||||
|
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-ai/extract-tables',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
assert 'task_id' in response.get_json()
|
||||||
42
backend/tests/test_pdf_to_excel.py
Normal file
42
backend/tests/test_pdf_to_excel.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""Tests for PDF to Excel endpoint — POST /api/convert/pdf-to-excel."""
|
||||||
|
import io
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
class TestPdfToExcel:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/convert/pdf-to-excel')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid PDF upload."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'pdf-excel-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_to_excel.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.pdf', 'pdf'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_to_excel.generate_safe_path',
|
||||||
|
lambda ext, folder_type: ('pdf-excel-task-id', '/tmp/mock.pdf'),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_to_excel.pdf_to_excel_task.delay',
|
||||||
|
MagicMock(return_value=mock_task),
|
||||||
|
)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'werkzeug.datastructures.file_storage.FileStorage.save',
|
||||||
|
lambda self, dst, buffer_size=16384: None,
|
||||||
|
)
|
||||||
|
|
||||||
|
from tests.conftest import make_pdf_bytes
|
||||||
|
data = {'file': (io.BytesIO(make_pdf_bytes()), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/convert/pdf-to-excel',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
json_data = response.get_json()
|
||||||
|
assert 'task_id' in json_data
|
||||||
@@ -528,4 +528,107 @@ class TestUnlockPdf:
|
|||||||
data=data,
|
data=data,
|
||||||
content_type='multipart/form-data',
|
content_type='multipart/form-data',
|
||||||
)
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# 9. Remove Watermark — POST /api/pdf-tools/remove-watermark
|
||||||
|
# =========================================================================
|
||||||
|
class TestRemoveWatermark:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-tools/remove-watermark')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid PDF."""
|
||||||
|
_mock_validate_and_task(
|
||||||
|
monkeypatch, 'app.routes.pdf_tools', 'remove_watermark_task'
|
||||||
|
)
|
||||||
|
data = {'file': (io.BytesIO(b'%PDF-1.4'), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-tools/remove-watermark',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# 10. Reorder PDF — POST /api/pdf-tools/reorder
|
||||||
|
# =========================================================================
|
||||||
|
class TestReorderPdf:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-tools/reorder')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_no_page_order(self, client, monkeypatch):
|
||||||
|
"""Should return 400 when no page_order provided."""
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_tools.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.pdf', 'pdf'),
|
||||||
|
)
|
||||||
|
data = {'file': (io.BytesIO(b'%PDF-1.4'), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-tools/reorder',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_validate_and_task(
|
||||||
|
monkeypatch, 'app.routes.pdf_tools', 'reorder_pdf_task'
|
||||||
|
)
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(b'%PDF-1.4'), 'test.pdf'),
|
||||||
|
'page_order': '3,1,2',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-tools/reorder',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# 11. Extract Pages — POST /api/pdf-tools/extract-pages
|
||||||
|
# =========================================================================
|
||||||
|
class TestExtractPages:
|
||||||
|
def test_no_file(self, client):
|
||||||
|
"""Should return 400 when no file provided."""
|
||||||
|
response = client.post('/api/pdf-tools/extract-pages')
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_no_pages(self, client, monkeypatch):
|
||||||
|
"""Should return 400 when no pages param provided."""
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.pdf_tools.validate_actor_file',
|
||||||
|
lambda f, allowed_types, actor: ('test.pdf', 'pdf'),
|
||||||
|
)
|
||||||
|
data = {'file': (io.BytesIO(b'%PDF-1.4'), 'test.pdf')}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-tools/extract-pages',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid request."""
|
||||||
|
_mock_validate_and_task(
|
||||||
|
monkeypatch, 'app.routes.pdf_tools', 'extract_pages_task'
|
||||||
|
)
|
||||||
|
data = {
|
||||||
|
'file': (io.BytesIO(b'%PDF-1.4'), 'test.pdf'),
|
||||||
|
'pages': '1,3,5-8',
|
||||||
|
}
|
||||||
|
response = client.post(
|
||||||
|
'/api/pdf-tools/extract-pages',
|
||||||
|
data=data,
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
assert response.status_code == 202
|
assert response.status_code == 202
|
||||||
57
backend/tests/test_qrcode.py
Normal file
57
backend/tests/test_qrcode.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
"""Tests for QR Code Generator endpoint — POST /api/qrcode/generate."""
|
||||||
|
import json
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
class TestQrCodeGenerator:
|
||||||
|
def test_no_data(self, client):
|
||||||
|
"""Should return 400 when no data provided."""
|
||||||
|
response = client.post(
|
||||||
|
'/api/qrcode/generate',
|
||||||
|
data=json.dumps({}),
|
||||||
|
content_type='application/json',
|
||||||
|
)
|
||||||
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
def test_success_json(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid JSON request."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'qr-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.qrcode.generate_qr_task',
|
||||||
|
MagicMock(delay=MagicMock(return_value=mock_task)),
|
||||||
|
)
|
||||||
|
|
||||||
|
response = client.post(
|
||||||
|
'/api/qrcode/generate',
|
||||||
|
data=json.dumps({'data': 'https://example.com', 'size': 300}),
|
||||||
|
content_type='application/json',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
json_data = response.get_json()
|
||||||
|
assert 'task_id' in json_data
|
||||||
|
|
||||||
|
def test_success_form_data(self, client, monkeypatch):
|
||||||
|
"""Should return 202 with task_id on valid form-data request."""
|
||||||
|
mock_task = MagicMock()
|
||||||
|
mock_task.id = 'qr-form-task-id'
|
||||||
|
monkeypatch.setattr(
|
||||||
|
'app.routes.qrcode.generate_qr_task',
|
||||||
|
MagicMock(delay=MagicMock(return_value=mock_task)),
|
||||||
|
)
|
||||||
|
|
||||||
|
response = client.post(
|
||||||
|
'/api/qrcode/generate',
|
||||||
|
data={'data': 'Hello World'},
|
||||||
|
content_type='multipart/form-data',
|
||||||
|
)
|
||||||
|
assert response.status_code == 202
|
||||||
|
|
||||||
|
def test_empty_data(self, client):
|
||||||
|
"""Should return 400 when data field is empty string."""
|
||||||
|
response = client.post(
|
||||||
|
'/api/qrcode/generate',
|
||||||
|
data=json.dumps({'data': ''}),
|
||||||
|
content_type='application/json',
|
||||||
|
)
|
||||||
|
assert response.status_code == 400
|
||||||
274
docs/tool_inventory.md
Normal file
274
docs/tool_inventory.md
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
# SaaS-PDF — Tool Inventory & Competitive Gap Analysis
|
||||||
|
|
||||||
|
> Generated: March 7, 2026
|
||||||
|
> Branch: `feature/critical-maintenance-and-editor`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Platform Infrastructure
|
||||||
|
|
||||||
|
| Component | Technology | Status |
|
||||||
|
|---|---|---|
|
||||||
|
| Backend | Flask + Gunicorn | ✅ Production-ready |
|
||||||
|
| Frontend | React + Vite + TypeScript + Tailwind | ✅ Production-ready |
|
||||||
|
| Task Queue | Celery + Redis | ✅ 3 queues (default, image, pdf_tools) |
|
||||||
|
| Scheduler | Celery Beat | ✅ Expired-file cleanup every 30 min |
|
||||||
|
| Database | SQLite | ✅ Users, API keys, history, usage events |
|
||||||
|
| Storage | Local + S3 (optional) | ✅ Presigned URLs |
|
||||||
|
| Auth | Session-based + API Key (B2B) | ✅ Free & Pro plans |
|
||||||
|
| Security | Talisman CSP, rate limiting, CORS, input sanitization | ✅ |
|
||||||
|
| i18n | react-i18next (en, ar, fr) | ✅ All tools translated |
|
||||||
|
| Monetization | Google AdSense slots | ✅ Integrated |
|
||||||
|
| Email | SMTP (password reset) | ✅ |
|
||||||
|
| Docker | docker-compose (dev + prod) | ✅ |
|
||||||
|
| Nginx | Reverse proxy + SSL | ✅ |
|
||||||
|
|
||||||
|
### Plans & Quotas
|
||||||
|
|
||||||
|
| | Free | Pro |
|
||||||
|
|---|---|---|
|
||||||
|
| Web requests/month | 50 | 500 |
|
||||||
|
| API requests/month | — | 1,000 |
|
||||||
|
| Max file size | 50 MB | 100 MB |
|
||||||
|
| History retention | 25 | 250 |
|
||||||
|
| API key access | ❌ | ✅ |
|
||||||
|
|
||||||
|
### Registered Blueprints: 18
|
||||||
|
|
||||||
|
| Blueprint | Prefix | Purpose |
|
||||||
|
|---|---|---|
|
||||||
|
| `health_bp` | `/api` | Health check |
|
||||||
|
| `auth_bp` | `/api/auth` | Login, register, forgot/reset password |
|
||||||
|
| `account_bp` | `/api/account` | Profile, API keys, usage |
|
||||||
|
| `admin_bp` | `/api/internal/admin` | Plan management |
|
||||||
|
| `convert_bp` | `/api/convert` | PDF ↔ Word |
|
||||||
|
| `compress_bp` | `/api/compress` | PDF compression |
|
||||||
|
| `image_bp` | `/api/image` | Image convert & resize |
|
||||||
|
| `video_bp` | `/api/video` | Video to GIF |
|
||||||
|
| `history_bp` | `/api` | User history |
|
||||||
|
| `pdf_tools_bp` | `/api/pdf-tools` | Merge, split, rotate, watermark, etc. |
|
||||||
|
| `flowchart_bp` | `/api/flowchart` | AI flowchart extraction |
|
||||||
|
| `tasks_bp` | `/api/tasks` | Task status polling |
|
||||||
|
| `download_bp` | `/api/download` | Secure file download |
|
||||||
|
| `v1_bp` | `/api/v1` | B2B API (all tools) |
|
||||||
|
| `config_bp` | `/api/config` | Dynamic limits |
|
||||||
|
| `ocr_bp` | `/api/ocr` | OCR text extraction |
|
||||||
|
| `removebg_bp` | `/api/remove-bg` | Background removal |
|
||||||
|
| `pdf_editor_bp` | `/api/pdf-editor` | PDF text annotations |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Existing Tools — Complete Inventory (21 tools)
|
||||||
|
|
||||||
|
### 2.1 PDF Tools (14)
|
||||||
|
|
||||||
|
| # | Tool | Endpoint | Service | Task | Component | Route | i18n | B2B API |
|
||||||
|
|---|---|---|---|---|---|---|---|---|
|
||||||
|
| 1 | **Compress PDF** | `POST /api/compress/pdf` | `compress_service` | `compress_pdf_task` | `PdfCompressor.tsx` | `/tools/compress-pdf` | ✅ | ✅ |
|
||||||
|
| 2 | **PDF to Word** | `POST /api/convert/pdf-to-word` | `pdf_service` | `convert_pdf_to_word` | `PdfToWord.tsx` | `/tools/pdf-to-word` | ✅ | ✅ |
|
||||||
|
| 3 | **Word to PDF** | `POST /api/convert/word-to-pdf` | `pdf_service` | `convert_word_to_pdf` | `WordToPdf.tsx` | `/tools/word-to-pdf` | ✅ | ✅ |
|
||||||
|
| 4 | **Merge PDF** | `POST /api/pdf-tools/merge` | `pdf_tools_service` | `merge_pdfs_task` | `MergePdf.tsx` | `/tools/merge-pdf` | ✅ | ✅ |
|
||||||
|
| 5 | **Split PDF** | `POST /api/pdf-tools/split` | `pdf_tools_service` | `split_pdf_task` | `SplitPdf.tsx` | `/tools/split-pdf` | ✅ | ✅ |
|
||||||
|
| 6 | **Rotate PDF** | `POST /api/pdf-tools/rotate` | `pdf_tools_service` | `rotate_pdf_task` | `RotatePdf.tsx` | `/tools/rotate-pdf` | ✅ | ✅ |
|
||||||
|
| 7 | **PDF to Images** | `POST /api/pdf-tools/pdf-to-images` | `pdf_tools_service` | `pdf_to_images_task` | `PdfToImages.tsx` | `/tools/pdf-to-images` | ✅ | ✅ |
|
||||||
|
| 8 | **Images to PDF** | `POST /api/pdf-tools/images-to-pdf` | `pdf_tools_service` | `images_to_pdf_task` | `ImagesToPdf.tsx` | `/tools/images-to-pdf` | ✅ | ✅ |
|
||||||
|
| 9 | **Watermark PDF** | `POST /api/pdf-tools/watermark` | `pdf_tools_service` | `watermark_pdf_task` | `WatermarkPdf.tsx` | `/tools/watermark-pdf` | ✅ | ✅ |
|
||||||
|
| 10 | **Protect PDF** | `POST /api/pdf-tools/protect` | `pdf_tools_service` | `protect_pdf_task` | `ProtectPdf.tsx` | `/tools/protect-pdf` | ✅ | ✅ |
|
||||||
|
| 11 | **Unlock PDF** | `POST /api/pdf-tools/unlock` | `pdf_tools_service` | `unlock_pdf_task` | `UnlockPdf.tsx` | `/tools/unlock-pdf` | ✅ | ✅ |
|
||||||
|
| 12 | **Add Page Numbers** | `POST /api/pdf-tools/page-numbers` | `pdf_tools_service` | `add_page_numbers_task` | `AddPageNumbers.tsx` | `/tools/page-numbers` | ✅ | ✅ |
|
||||||
|
| 13 | **PDF Editor** | `POST /api/pdf-editor/edit` | `pdf_editor_service` | `edit_pdf_task` | `PdfEditor.tsx` | `/tools/pdf-editor` | ✅ | ❌ |
|
||||||
|
| 14 | **PDF Flowchart** | `POST /api/flowchart/extract` + 3 | `flowchart_service` | `extract_flowchart_task` | `PdfFlowchart.tsx` | `/tools/pdf-flowchart` | ✅ | ✅ |
|
||||||
|
|
||||||
|
### 2.2 Image Tools (4)
|
||||||
|
|
||||||
|
| # | Tool | Endpoint | Service | Task | Component | Route | i18n | B2B API |
|
||||||
|
|---|---|---|---|---|---|---|---|---|
|
||||||
|
| 15 | **Image Converter** | `POST /api/image/convert` | `image_service` | `convert_image_task` | `ImageConverter.tsx` | `/tools/image-converter` | ✅ | ✅ |
|
||||||
|
| 16 | **Image Resize** | `POST /api/image/resize` | `image_service` | `resize_image_task` | `ImageResize.tsx` | `/tools/image-resize` | ✅ | ✅ |
|
||||||
|
| 17 | **OCR** | `POST /api/ocr/image` + `/pdf` | `ocr_service` | `ocr_image_task` / `ocr_pdf_task` | `OcrTool.tsx` | `/tools/ocr` | ✅ | ❌ |
|
||||||
|
| 18 | **Remove Background** | `POST /api/remove-bg` | `removebg_service` | `remove_bg_task` | `RemoveBackground.tsx` | `/tools/remove-background` | ✅ | ❌ |
|
||||||
|
|
||||||
|
### 2.3 Video Tools (1)
|
||||||
|
|
||||||
|
| # | Tool | Endpoint | Service | Task | Component | Route | i18n | B2B API |
|
||||||
|
|---|---|---|---|---|---|---|---|---|
|
||||||
|
| 19 | **Video to GIF** | `POST /api/video/to-gif` | `video_service` | `create_gif_task` | `VideoToGif.tsx` | `/tools/video-to-gif` | ✅ | ✅ |
|
||||||
|
|
||||||
|
### 2.4 Text Tools — Client-Side Only (2)
|
||||||
|
|
||||||
|
| # | Tool | Backend | Component | Route | i18n |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| 20 | **Word Counter** | None (JS) | `WordCounter.tsx` | `/tools/word-counter` | ✅ |
|
||||||
|
| 21 | **Text Cleaner** | None (JS) | `TextCleaner.tsx` | `/tools/text-cleaner` | ✅ |
|
||||||
|
|
||||||
|
### Feature Flags
|
||||||
|
|
||||||
|
| Flag | Default | Controls |
|
||||||
|
|---|---|---|
|
||||||
|
| `FEATURE_EDITOR` | `false` | OCR, Remove Background, PDF Editor routes (403 when off) |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Test Coverage
|
||||||
|
|
||||||
|
| Category | Test Files | Tests |
|
||||||
|
|---|---|---|
|
||||||
|
| Auth | `test_auth.py` | 5 |
|
||||||
|
| Config | `test_config.py` | 3 |
|
||||||
|
| Password reset | `test_password_reset.py` | 8 |
|
||||||
|
| Maintenance | `test_maintenance_tasks.py` | 8 |
|
||||||
|
| Compress | `test_compress.py`, `test_compress_service.py`, `test_compress_tasks.py` | 6 |
|
||||||
|
| Convert | `test_convert.py`, `test_convert_tasks.py` | 6 |
|
||||||
|
| Image | `test_image.py`, `test_image_service.py`, `test_image_tasks.py` | ~18 |
|
||||||
|
| Video | `test_video.py`, `test_video_service.py`, `test_video_tasks.py` | ~12 |
|
||||||
|
| PDF tools | `test_pdf_tools.py`, `test_pdf_tools_service.py`, `test_pdf_tools_tasks.py` | ~50 |
|
||||||
|
| Flowchart | `test_flowchart_tasks.py` | ~6 |
|
||||||
|
| OCR | `test_ocr.py`, `test_ocr_service.py` | 12 |
|
||||||
|
| Remove BG | `test_removebg.py` | 3 |
|
||||||
|
| PDF Editor | `test_pdf_editor.py` | 7 |
|
||||||
|
| Infra | `test_download.py`, `test_health.py`, `test_history.py`, `test_rate_limiter.py`, `test_sanitizer.py`, `test_storage_service.py`, `test_file_validator.py`, `test_utils.py`, `test_tasks_route.py` | ~36 |
|
||||||
|
| **TOTAL** | **30 files** | **180 ✅** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Missing Tools — Competitive Gap Analysis
|
||||||
|
|
||||||
|
Comparison against: iLovePDF, SmallPDF, TinyWow, PDF24, Adobe Acrobat Online.
|
||||||
|
|
||||||
|
### 4.1 HIGH PRIORITY — Core tools competitors all have
|
||||||
|
|
||||||
|
| # | Tool | Category | Complexity | Dependencies | Notes |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| 1 | **Compress Image** | Image | Low | Pillow (exists) | JPEG/PNG/WebP quality reduction + resize. Pillow already installed. |
|
||||||
|
| 2 | **PDF to Excel** | PDF → Office | Medium | `camelot-py` or `tabula-py` | Table extraction from PDFs — high user demand. |
|
||||||
|
| 3 | **PDF to PowerPoint** | PDF → Office | Medium | `python-pptx` | Convert PDF pages to PPTX slides (images per slide or OCR). |
|
||||||
|
| 4 | **Excel to PDF** | Office → PDF | Medium | LibreOffice CLI | Same pattern as Word to PDF. |
|
||||||
|
| 5 | **PowerPoint to PDF** | Office → PDF | Medium | LibreOffice CLI | Same pattern as Word to PDF. |
|
||||||
|
| 6 | **HTML to PDF** | Web → PDF | Low | `weasyprint` or `playwright` | Input URL or HTML snippet → PDF. |
|
||||||
|
| 7 | **Reorder / Rearrange Pages** | PDF | Low | PyPDF2 (exists) | Drag-and-drop page reorder UI → backend rebuilds PDF. |
|
||||||
|
| 8 | **Extract Pages** | PDF | Low | PyPDF2 (exists) | Similar to Split but with visual page picker. Already partially covered by Split tool. |
|
||||||
|
| 9 | **Sign PDF** | PDF | Medium | ReportLab + canvas | Draw/upload signature → overlay onto PDF page. |
|
||||||
|
| 10 | **PDF Repair** | PDF | Low | PyPDF2 (exists) | Read → rewrite to fix broken xref tables. |
|
||||||
|
|
||||||
|
### 4.2 MEDIUM PRIORITY — Differentiators present on 2–3 competitors
|
||||||
|
|
||||||
|
| # | Tool | Category | Complexity | Dependencies | Notes |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| 11 | **PDF to PDF/A** | PDF | Medium | Ghostscript (exists) | Archival format conversion. |
|
||||||
|
| 12 | **Flatten PDF** | PDF | Low | PyPDF2 (exists) | Remove form fields / annotations → flat page. |
|
||||||
|
| 13 | **Crop PDF** | PDF | Medium | PyPDF2 (exists) | Crop margins / adjust page boundaries. |
|
||||||
|
| 14 | **Compare PDFs** | PDF | High | `diff-match-patch` + PyPDF2 | Side-by-side visual diff of two documents. |
|
||||||
|
| 15 | **QR Code Generator** | Utility | Low | `qrcode` + Pillow | Text/URL → QR image. Client-side possible but backend for API. |
|
||||||
|
| 16 | **Barcode Generator** | Utility | Low | `python-barcode` | Generate Code128, EAN, UPC barcodes. |
|
||||||
|
| 17 | **Image Crop** | Image | Low | Pillow (exists) | Visual cropping UI → backend Pillow crop. |
|
||||||
|
| 18 | **Image Rotate / Flip** | Image | Low | Pillow (exists) | 90°/180°/270° + horizontal/vertical flip. |
|
||||||
|
| 19 | **Image Filters** | Image | Low | Pillow (exists) | Grayscale, sepia, blur, sharpen, brightness, contrast. |
|
||||||
|
|
||||||
|
### 4.3 LOW PRIORITY — Advanced / niche (1–2 competitors, premium features)
|
||||||
|
|
||||||
|
| # | Tool | Category | Complexity | Dependencies | Notes |
|
||||||
|
|---|---|---|---|---|---|
|
||||||
|
| 20 | **AI Chat with PDF** | AI | High | OpenRouter (exists) | Upload PDF → ask questions. Flowchart service has partial foundation. |
|
||||||
|
| 21 | **AI PDF Summarizer** | AI | Medium | OpenRouter (exists) | Extract text → prompt LLM for summary. |
|
||||||
|
| 22 | **AI PDF Translator** | AI | Medium | OpenRouter (exists) | Extract text → translate via LLM → overlay or return translated doc. |
|
||||||
|
| 23 | **PDF Form Filler** | PDF | High | ReportLab + PyPDF2 | Detect form fields → UI to fill → save. |
|
||||||
|
| 24 | **Redact PDF** | PDF | Medium | ReportLab + PyPDF2 | Blackout sensitive text regions. |
|
||||||
|
| 25 | **PDF Metadata Editor** | PDF | Low | PyPDF2 (exists) | Edit title, author, subject, keywords. |
|
||||||
|
| 26 | **eSign / Digital Signature** | PDF | High | `cryptography` + PKCS#7 | Cryptographic digital signatures (different from visual sign). |
|
||||||
|
| 27 | **Batch Processing** | All | Medium | Existing tasks | Upload multiple files → apply same operation to all. |
|
||||||
|
| 28 | **GIF to Video** | Video | Medium | ffmpeg (exists) | Reverse of Video to GIF. |
|
||||||
|
| 29 | **Video Compress** | Video | Medium | ffmpeg (exists) | Reduce video file size. |
|
||||||
|
| 30 | **Audio Extract** | Video | Low | ffmpeg (exists) | Extract audio track from video → MP3/WAV. |
|
||||||
|
| 31 | **Screenshot to PDF** | Utility | Low | Pillow (exists) | Paste screenshot → generate PDF (similar to Images to PDF). |
|
||||||
|
| 32 | **Markdown to PDF** | Utility | Low | `markdown` + WeasyPrint | Render Markdown → PDF. |
|
||||||
|
| 33 | **JSON / CSV Viewer** | Utility | Low | Client-side | Pretty-print structured data. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Implementation Readiness Matrix
|
||||||
|
|
||||||
|
Tools grouped by effort required (backend dependencies already present in the project):
|
||||||
|
|
||||||
|
### Ready to build (dependencies exist: PyPDF2, Pillow, Ghostscript, ffmpeg)
|
||||||
|
|
||||||
|
| Tool | Effort | Reuses |
|
||||||
|
|---|---|---|
|
||||||
|
| Compress Image | ~2h | `image_service.py` + Pillow |
|
||||||
|
| Reorder Pages | ~3h | `pdf_tools_service.py` + PyPDF2 |
|
||||||
|
| Extract Pages | ~2h | Split tool pattern |
|
||||||
|
| PDF Repair | ~2h | PyPDF2 read/write |
|
||||||
|
| Flatten PDF | ~2h | PyPDF2 |
|
||||||
|
| Crop PDF | ~3h | PyPDF2 MediaBox |
|
||||||
|
| Image Crop | ~2h | Pillow |
|
||||||
|
| Image Rotate/Flip | ~2h | Pillow |
|
||||||
|
| Image Filters | ~3h | Pillow ImageFilter |
|
||||||
|
| PDF Metadata Editor | ~2h | PyPDF2 |
|
||||||
|
| PDF to PDF/A | ~2h | Ghostscript (exists in Dockerfile) |
|
||||||
|
| QR Code Generator | ~2h | `qrcode` pip package |
|
||||||
|
| AI PDF Summarizer | ~3h | `ai_chat_service.py` + OpenRouter |
|
||||||
|
| GIF to Video | ~2h | ffmpeg |
|
||||||
|
| Audio Extract | ~2h | ffmpeg |
|
||||||
|
|
||||||
|
### Need new dependencies (1 pip package)
|
||||||
|
|
||||||
|
| Tool | New Dependency | Effort |
|
||||||
|
|---|---|---|
|
||||||
|
| PDF to Excel | `camelot-py[cv]` or `tabula-py` | ~4h |
|
||||||
|
| PDF to PowerPoint | `python-pptx` | ~4h |
|
||||||
|
| Excel to PDF | LibreOffice CLI (exists) | ~3h |
|
||||||
|
| PowerPoint to PDF | LibreOffice CLI (exists) | ~3h |
|
||||||
|
| HTML to PDF | `weasyprint` or `playwright` | ~4h |
|
||||||
|
| Sign PDF | ReportLab (exists) + canvas overlay | ~6h |
|
||||||
|
| Barcode Generator | `python-barcode` | ~2h |
|
||||||
|
| Markdown to PDF | `markdown` + `weasyprint` | ~3h |
|
||||||
|
|
||||||
|
### Requires significant new architecture
|
||||||
|
|
||||||
|
| Tool | Complexity | Effort |
|
||||||
|
|---|---|---|
|
||||||
|
| AI Chat with PDF | RAG pipeline or full-doc prompt | ~8h |
|
||||||
|
| AI PDF Translator | OCR + LLM + overlay | ~8h |
|
||||||
|
| PDF Form Filler | Field detection + fill engine | ~10h |
|
||||||
|
| Redact PDF | Region detection + blackout overlay | ~6h |
|
||||||
|
| Compare PDFs | Diff algorithm + visual rendering | ~10h |
|
||||||
|
| eSign / Digital Signature | PKCS#7 cryptographic signing | ~10h |
|
||||||
|
| Batch Processing | Queue orchestration for multi-file | ~6h |
|
||||||
|
| Video Compress | ffmpeg transcoding | ~4h |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Summary
|
||||||
|
|
||||||
|
| Metric | Count |
|
||||||
|
|---|---|
|
||||||
|
| **Existing tools** | 21 |
|
||||||
|
| **Missing HIGH priority** | 10 |
|
||||||
|
| **Missing MEDIUM priority** | 9 |
|
||||||
|
| **Missing LOW priority** | 14 |
|
||||||
|
| **Total gap** | 33 |
|
||||||
|
| **Backend tests** | 180 ✅ |
|
||||||
|
| **Frontend build** | ✅ Clean |
|
||||||
|
| **Blueprints** | 18 |
|
||||||
|
| **Celery task modules** | 10 |
|
||||||
|
| **Service files** | 15 |
|
||||||
|
| **i18n languages** | 3 (en, ar, fr) |
|
||||||
|
|
||||||
|
### Competitor Parity Score
|
||||||
|
|
||||||
|
| Competitor | Their tools | We match | Coverage |
|
||||||
|
|---|---|---|---|
|
||||||
|
| iLovePDF | ~25 core | ~16 | 64% |
|
||||||
|
| SmallPDF | ~21 core | ~15 | 71% |
|
||||||
|
| TinyWow | ~50+ (many AI) | ~14 | 28% |
|
||||||
|
| PDF24 | ~30 core | ~17 | 57% |
|
||||||
|
|
||||||
|
### Recommended Next Sprint
|
||||||
|
|
||||||
|
**Highest ROI — 6 tools to reach 80%+ parity with SmallPDF/iLovePDF:**
|
||||||
|
|
||||||
|
1. Compress Image (Pillow — already installed)
|
||||||
|
2. PDF to Excel (`camelot-py`)
|
||||||
|
3. HTML to PDF (`weasyprint`)
|
||||||
|
4. Sign PDF (ReportLab overlay)
|
||||||
|
5. Reorder Pages (PyPDF2 — already installed)
|
||||||
|
6. PDF to PowerPoint (`python-pptx`)
|
||||||
@@ -38,6 +38,17 @@ const PdfFlowchart = lazy(() => import('@/components/tools/PdfFlowchart'));
|
|||||||
const ImageResize = lazy(() => import('@/components/tools/ImageResize'));
|
const ImageResize = lazy(() => import('@/components/tools/ImageResize'));
|
||||||
const OcrTool = lazy(() => import('@/components/tools/OcrTool'));
|
const OcrTool = lazy(() => import('@/components/tools/OcrTool'));
|
||||||
const RemoveBackground = lazy(() => import('@/components/tools/RemoveBackground'));
|
const RemoveBackground = lazy(() => import('@/components/tools/RemoveBackground'));
|
||||||
|
const CompressImage = lazy(() => import('@/components/tools/CompressImage'));
|
||||||
|
const PdfToExcel = lazy(() => import('@/components/tools/PdfToExcel'));
|
||||||
|
const RemoveWatermark = lazy(() => import('@/components/tools/RemoveWatermark'));
|
||||||
|
const ReorderPdf = lazy(() => import('@/components/tools/ReorderPdf'));
|
||||||
|
const ExtractPages = lazy(() => import('@/components/tools/ExtractPages'));
|
||||||
|
const QrCodeGenerator = lazy(() => import('@/components/tools/QrCodeGenerator'));
|
||||||
|
const HtmlToPdf = lazy(() => import('@/components/tools/HtmlToPdf'));
|
||||||
|
const ChatPdf = lazy(() => import('@/components/tools/ChatPdf'));
|
||||||
|
const SummarizePdf = lazy(() => import('@/components/tools/SummarizePdf'));
|
||||||
|
const TranslatePdf = lazy(() => import('@/components/tools/TranslatePdf'));
|
||||||
|
const TableExtractor = lazy(() => import('@/components/tools/TableExtractor'));
|
||||||
|
|
||||||
function LoadingFallback() {
|
function LoadingFallback() {
|
||||||
return (
|
return (
|
||||||
@@ -96,9 +107,28 @@ export default function App() {
|
|||||||
{/* Image Tools */}
|
{/* Image Tools */}
|
||||||
<Route path="/tools/image-converter" element={<ImageConverter />} />
|
<Route path="/tools/image-converter" element={<ImageConverter />} />
|
||||||
<Route path="/tools/image-resize" element={<ImageResize />} />
|
<Route path="/tools/image-resize" element={<ImageResize />} />
|
||||||
|
<Route path="/tools/compress-image" element={<CompressImage />} />
|
||||||
<Route path="/tools/ocr" element={<OcrTool />} />
|
<Route path="/tools/ocr" element={<OcrTool />} />
|
||||||
<Route path="/tools/remove-background" element={<RemoveBackground />} />
|
<Route path="/tools/remove-background" element={<RemoveBackground />} />
|
||||||
|
|
||||||
|
{/* Convert Tools */}
|
||||||
|
<Route path="/tools/pdf-to-excel" element={<PdfToExcel />} />
|
||||||
|
<Route path="/tools/html-to-pdf" element={<HtmlToPdf />} />
|
||||||
|
|
||||||
|
{/* PDF Extra Tools */}
|
||||||
|
<Route path="/tools/remove-watermark-pdf" element={<RemoveWatermark />} />
|
||||||
|
<Route path="/tools/reorder-pdf" element={<ReorderPdf />} />
|
||||||
|
<Route path="/tools/extract-pages" element={<ExtractPages />} />
|
||||||
|
|
||||||
|
{/* AI Tools */}
|
||||||
|
<Route path="/tools/chat-pdf" element={<ChatPdf />} />
|
||||||
|
<Route path="/tools/summarize-pdf" element={<SummarizePdf />} />
|
||||||
|
<Route path="/tools/translate-pdf" element={<TranslatePdf />} />
|
||||||
|
<Route path="/tools/extract-tables" element={<TableExtractor />} />
|
||||||
|
|
||||||
|
{/* Other Tools */}
|
||||||
|
<Route path="/tools/qr-code" element={<QrCodeGenerator />} />
|
||||||
|
|
||||||
{/* Video Tools */}
|
{/* Video Tools */}
|
||||||
<Route path="/tools/video-to-gif" element={<VideoToGif />} />
|
<Route path="/tools/video-to-gif" element={<VideoToGif />} />
|
||||||
|
|
||||||
|
|||||||
140
frontend/src/components/tools/ChatPdf.tsx
Normal file
140
frontend/src/components/tools/ChatPdf.tsx
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { MessageSquare } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function ChatPdf() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [question, setQuestion] = useState('');
|
||||||
|
const [reply, setReply] = useState('');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-ai/chat',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
extraData: { question },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: (r) => {
|
||||||
|
setPhase('done');
|
||||||
|
setReply((r as Record<string, unknown>).reply as string || '');
|
||||||
|
},
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
if (!question.trim()) return;
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setQuestion(''); setReply(''); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.chatPdf.title'),
|
||||||
|
description: t('tools.chatPdf.description'),
|
||||||
|
url: `${window.location.origin}/tools/chat-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.chatPdf.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.chatPdf.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/chat-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-blue-100 dark:bg-blue-900/30">
|
||||||
|
<MessageSquare className="h-8 w-8 text-blue-600 dark:text-blue-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.chatPdf.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.chatPdf.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.chatPdf.questionLabel')}
|
||||||
|
</label>
|
||||||
|
<textarea
|
||||||
|
value={question} onChange={(e) => setQuestion(e.target.value)}
|
||||||
|
placeholder={t('tools.chatPdf.questionPlaceholder')}
|
||||||
|
rows={3}
|
||||||
|
className="w-full rounded-lg border border-slate-300 px-3 py-2 text-sm dark:border-slate-600 dark:bg-slate-700 dark:text-slate-200"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} disabled={!question.trim()}
|
||||||
|
className="btn-primary w-full disabled:opacity-50 disabled:cursor-not-allowed">
|
||||||
|
{t('tools.chatPdf.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && reply && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-2xl bg-white p-6 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<h3 className="mb-3 text-sm font-semibold text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.chatPdf.answer')}
|
||||||
|
</h3>
|
||||||
|
<div className="prose prose-sm dark:prose-invert max-w-none whitespace-pre-wrap text-slate-600 dark:text-slate-300">
|
||||||
|
{reply}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && !reply && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
125
frontend/src/components/tools/CompressImage.tsx
Normal file
125
frontend/src/components/tools/CompressImage.tsx
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Minimize2 } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
import { useConfig } from '@/hooks/useConfig';
|
||||||
|
|
||||||
|
export default function CompressImage() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const { limits } = useConfig();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [quality, setQuality] = useState(75);
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/image/compress',
|
||||||
|
maxSizeMB: limits.image,
|
||||||
|
acceptedTypes: ['png', 'jpg', 'jpeg', 'webp'],
|
||||||
|
extraData: { quality: quality.toString() },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setQuality(75); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.compressImage.title'),
|
||||||
|
description: t('tools.compressImage.description'),
|
||||||
|
url: `${window.location.origin}/tools/compress-image`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.compressImage.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.compressImage.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/compress-image`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-orange-100 dark:bg-orange-900/30">
|
||||||
|
<Minimize2 className="h-8 w-8 text-orange-600 dark:text-orange-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.compressImage.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.compressImage.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'image/png': ['.png'], 'image/jpeg': ['.jpg', '.jpeg'], 'image/webp': ['.webp'] }}
|
||||||
|
maxSizeMB={limits.image} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="Images (PNG, JPG, WebP)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 flex items-center justify-between text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
<span>{t('tools.compressImage.quality')}</span>
|
||||||
|
<span className="text-primary-600">{quality}%</span>
|
||||||
|
</label>
|
||||||
|
<input type="range" min="10" max="100" value={quality}
|
||||||
|
onChange={(e) => setQuality(Number(e.target.value))}
|
||||||
|
className="w-full accent-primary-600" />
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.compressImage.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
130
frontend/src/components/tools/ExtractPages.tsx
Normal file
130
frontend/src/components/tools/ExtractPages.tsx
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { FileOutput } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function ExtractPages() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [pages, setPages] = useState('');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-tools/extract-pages',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
extraData: { pages },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
if (!pages.trim()) return;
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setPages(''); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.extractPages.title'),
|
||||||
|
description: t('tools.extractPages.description'),
|
||||||
|
url: `${window.location.origin}/tools/extract-pages`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.extractPages.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.extractPages.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/extract-pages`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-amber-100 dark:bg-amber-900/30">
|
||||||
|
<FileOutput className="h-8 w-8 text-amber-600 dark:text-amber-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.extractPages.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.extractPages.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.extractPages.pagesLabel')}
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text" value={pages}
|
||||||
|
onChange={(e) => setPages(e.target.value)}
|
||||||
|
placeholder={t('tools.extractPages.pagesPlaceholder')}
|
||||||
|
className="w-full rounded-lg border border-slate-300 px-3 py-2 text-sm dark:border-slate-600 dark:bg-slate-700 dark:text-slate-200"
|
||||||
|
/>
|
||||||
|
<p className="mt-2 text-xs text-slate-400 dark:text-slate-500">
|
||||||
|
{t('tools.extractPages.pagesHint')}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} disabled={!pages.trim()}
|
||||||
|
className="btn-primary w-full disabled:opacity-50 disabled:cursor-not-allowed">
|
||||||
|
{t('tools.extractPages.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
103
frontend/src/components/tools/HtmlToPdf.tsx
Normal file
103
frontend/src/components/tools/HtmlToPdf.tsx
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
import { useState } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Code } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
|
||||||
|
export default function HtmlToPdf() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/convert/html-to-pdf',
|
||||||
|
maxSizeMB: 10,
|
||||||
|
acceptedTypes: ['html', 'htm'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.htmlToPdf.title'),
|
||||||
|
description: t('tools.htmlToPdf.description'),
|
||||||
|
url: `${window.location.origin}/tools/html-to-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.htmlToPdf.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.htmlToPdf.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/html-to-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-sky-100 dark:bg-sky-900/30">
|
||||||
|
<Code className="h-8 w-8 text-sky-600 dark:text-sky-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.htmlToPdf.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.htmlToPdf.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'text/html': ['.html', '.htm'] }}
|
||||||
|
maxSizeMB={10} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="HTML (.html, .htm)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.htmlToPdf.shortDesc')}
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
110
frontend/src/components/tools/PdfToExcel.tsx
Normal file
110
frontend/src/components/tools/PdfToExcel.tsx
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Sheet } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function PdfToExcel() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/convert/pdf-to-excel',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.pdfToExcel.title'),
|
||||||
|
description: t('tools.pdfToExcel.description'),
|
||||||
|
url: `${window.location.origin}/tools/pdf-to-excel`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.pdfToExcel.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.pdfToExcel.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/pdf-to-excel`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-green-100 dark:bg-green-900/30">
|
||||||
|
<Sheet className="h-8 w-8 text-green-600 dark:text-green-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.pdfToExcel.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.pdfToExcel.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.pdfToExcel.shortDesc')}
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
135
frontend/src/components/tools/QrCodeGenerator.tsx
Normal file
135
frontend/src/components/tools/QrCodeGenerator.tsx
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
import { useState } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { QrCode } from 'lucide-react';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import api, { type TaskResponse, type TaskResult } from '@/services/api';
|
||||||
|
|
||||||
|
export default function QrCodeGenerator() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'input' | 'processing' | 'done'>('input');
|
||||||
|
const [data, setData] = useState('');
|
||||||
|
const [size, setSize] = useState(300);
|
||||||
|
const [taskId, setTaskId] = useState<string | null>(null);
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const handleGenerate = async () => {
|
||||||
|
if (!data.trim()) return;
|
||||||
|
setError(null);
|
||||||
|
setPhase('processing');
|
||||||
|
try {
|
||||||
|
const res = await api.post<TaskResponse>('/qrcode/generate', { data: data.trim(), size });
|
||||||
|
setTaskId(res.data.task_id);
|
||||||
|
} catch (err) {
|
||||||
|
setError(err instanceof Error ? err.message : 'Failed to generate QR code.');
|
||||||
|
setPhase('done');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => {
|
||||||
|
setPhase('input');
|
||||||
|
setData('');
|
||||||
|
setSize(300);
|
||||||
|
setTaskId(null);
|
||||||
|
setError(null);
|
||||||
|
};
|
||||||
|
|
||||||
|
const downloadUrl = result?.download_url ? `/api${result.download_url}` : null;
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.qrCode.title'),
|
||||||
|
description: t('tools.qrCode.description'),
|
||||||
|
url: `${window.location.origin}/tools/qr-code`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.qrCode.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.qrCode.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/qr-code`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-indigo-100 dark:bg-indigo-900/30">
|
||||||
|
<QrCode className="h-8 w-8 text-indigo-600 dark:text-indigo-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.qrCode.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.qrCode.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'input' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700 space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.qrCode.dataLabel')}
|
||||||
|
</label>
|
||||||
|
<textarea
|
||||||
|
value={data} onChange={(e) => setData(e.target.value)}
|
||||||
|
placeholder={t('tools.qrCode.dataPlaceholder')}
|
||||||
|
rows={3}
|
||||||
|
className="w-full rounded-lg border border-slate-300 px-3 py-2 text-sm dark:border-slate-600 dark:bg-slate-700 dark:text-slate-200"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="mb-2 flex items-center justify-between text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
<span>{t('tools.qrCode.sizeLabel')}</span>
|
||||||
|
<span className="text-primary-600">{size}px</span>
|
||||||
|
</label>
|
||||||
|
<input type="range" min="100" max="1000" step="50" value={size}
|
||||||
|
onChange={(e) => setSize(Number(e.target.value))}
|
||||||
|
className="w-full accent-primary-600" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleGenerate} disabled={!data.trim()}
|
||||||
|
className="btn-primary w-full disabled:opacity-50 disabled:cursor-not-allowed">
|
||||||
|
{t('tools.qrCode.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && downloadUrl && (
|
||||||
|
<div className="space-y-6 text-center">
|
||||||
|
<div className="rounded-2xl bg-white p-8 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<img src={downloadUrl} alt="QR Code" className="mx-auto max-w-[300px] rounded-lg" />
|
||||||
|
</div>
|
||||||
|
<div className="flex gap-3">
|
||||||
|
<a href={downloadUrl} download={result.filename || 'qrcode.png'}
|
||||||
|
className="btn-primary flex-1">{t('common.download')}</a>
|
||||||
|
<button onClick={handleReset} className="btn-secondary flex-1">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && (taskError || error) && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError || error}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
110
frontend/src/components/tools/RemoveWatermark.tsx
Normal file
110
frontend/src/components/tools/RemoveWatermark.tsx
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Droplets } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function RemoveWatermark() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-tools/remove-watermark',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.removeWatermark.title'),
|
||||||
|
description: t('tools.removeWatermark.description'),
|
||||||
|
url: `${window.location.origin}/tools/remove-watermark-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.removeWatermark.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.removeWatermark.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/remove-watermark-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-rose-100 dark:bg-rose-900/30">
|
||||||
|
<Droplets className="h-8 w-8 text-rose-600 dark:text-rose-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.removeWatermark.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.removeWatermark.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.removeWatermark.shortDesc')}
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
130
frontend/src/components/tools/ReorderPdf.tsx
Normal file
130
frontend/src/components/tools/ReorderPdf.tsx
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { ArrowUpDown } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import DownloadButton from '@/components/shared/DownloadButton';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function ReorderPdf() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [pageOrder, setPageOrder] = useState('');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-tools/reorder',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
extraData: { page_order: pageOrder },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: () => setPhase('done'),
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
if (!pageOrder.trim()) return;
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setPageOrder(''); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.reorderPdf.title'),
|
||||||
|
description: t('tools.reorderPdf.description'),
|
||||||
|
url: `${window.location.origin}/tools/reorder-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.reorderPdf.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.reorderPdf.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/reorder-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-violet-100 dark:bg-violet-900/30">
|
||||||
|
<ArrowUpDown className="h-8 w-8 text-violet-600 dark:text-violet-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.reorderPdf.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.reorderPdf.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.reorderPdf.orderLabel')}
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text" value={pageOrder}
|
||||||
|
onChange={(e) => setPageOrder(e.target.value)}
|
||||||
|
placeholder={t('tools.reorderPdf.orderPlaceholder')}
|
||||||
|
className="w-full rounded-lg border border-slate-300 px-3 py-2 text-sm dark:border-slate-600 dark:bg-slate-700 dark:text-slate-200"
|
||||||
|
/>
|
||||||
|
<p className="mt-2 text-xs text-slate-400 dark:text-slate-500">
|
||||||
|
{t('tools.reorderPdf.orderHint')}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} disabled={!pageOrder.trim()}
|
||||||
|
className="btn-primary w-full disabled:opacity-50 disabled:cursor-not-allowed">
|
||||||
|
{t('tools.reorderPdf.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && result.status === 'completed' && (
|
||||||
|
<DownloadButton result={result} onStartOver={handleReset} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
144
frontend/src/components/tools/SummarizePdf.tsx
Normal file
144
frontend/src/components/tools/SummarizePdf.tsx
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { FileText } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
export default function SummarizePdf() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [length, setLength] = useState('medium');
|
||||||
|
const [summary, setSummary] = useState('');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-ai/summarize',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
extraData: { length },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: (r) => {
|
||||||
|
setPhase('done');
|
||||||
|
setSummary((r as Record<string, unknown>).summary as string || '');
|
||||||
|
},
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setLength('medium'); setSummary(''); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.summarizePdf.title'),
|
||||||
|
description: t('tools.summarizePdf.description'),
|
||||||
|
url: `${window.location.origin}/tools/summarize-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.summarizePdf.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.summarizePdf.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/summarize-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-emerald-100 dark:bg-emerald-900/30">
|
||||||
|
<FileText className="h-8 w-8 text-emerald-600 dark:text-emerald-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.summarizePdf.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.summarizePdf.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.summarizePdf.lengthLabel')}
|
||||||
|
</label>
|
||||||
|
<div className="grid grid-cols-3 gap-2">
|
||||||
|
{(['short', 'medium', 'long'] as const).map((opt) => (
|
||||||
|
<button key={opt} onClick={() => setLength(opt)}
|
||||||
|
className={`rounded-lg px-3 py-2 text-sm font-medium transition-colors ${
|
||||||
|
length === opt
|
||||||
|
? 'bg-emerald-600 text-white'
|
||||||
|
: 'bg-slate-100 text-slate-600 hover:bg-slate-200 dark:bg-slate-700 dark:text-slate-300'
|
||||||
|
}`}>
|
||||||
|
{t(`tools.summarizePdf.${opt}`)}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.summarizePdf.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && summary && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-2xl bg-white p-6 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<h3 className="mb-3 text-sm font-semibold text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.summarizePdf.resultTitle')}
|
||||||
|
</h3>
|
||||||
|
<div className="prose prose-sm dark:prose-invert max-w-none whitespace-pre-wrap text-slate-600 dark:text-slate-300">
|
||||||
|
{summary}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && !summary && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
166
frontend/src/components/tools/TableExtractor.tsx
Normal file
166
frontend/src/components/tools/TableExtractor.tsx
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Table } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
interface ExtractedTable {
|
||||||
|
page: number;
|
||||||
|
table_index: number;
|
||||||
|
headers: string[];
|
||||||
|
rows: string[][];
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function TableExtractor() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [tables, setTables] = useState<ExtractedTable[]>([]);
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-ai/extract-tables',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: (r) => {
|
||||||
|
setPhase('done');
|
||||||
|
const raw = (r as Record<string, unknown>).tables;
|
||||||
|
if (Array.isArray(raw)) setTables(raw as ExtractedTable[]);
|
||||||
|
},
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setTables([]); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.tableExtractor.title'),
|
||||||
|
description: t('tools.tableExtractor.description'),
|
||||||
|
url: `${window.location.origin}/tools/extract-tables`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.tableExtractor.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.tableExtractor.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/extract-tables`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-3xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-teal-100 dark:bg-teal-900/30">
|
||||||
|
<Table className="h-8 w-8 text-teal-600 dark:text-teal-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.tableExtractor.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.tableExtractor.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.tableExtractor.shortDesc')}
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && tables.length > 0 && (
|
||||||
|
<div className="space-y-6">
|
||||||
|
<p className="text-center text-sm text-slate-500 dark:text-slate-400">
|
||||||
|
{t('tools.tableExtractor.tablesFound', { count: tables.length })}
|
||||||
|
</p>
|
||||||
|
{tables.map((tbl, idx) => (
|
||||||
|
<div key={idx} className="overflow-hidden rounded-2xl bg-white ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<div className="border-b border-slate-200 px-4 py-2 dark:border-slate-700">
|
||||||
|
<span className="text-xs font-semibold text-slate-500 dark:text-slate-400">
|
||||||
|
{t('tools.tableExtractor.tablePage', { page: tbl.page, index: tbl.table_index + 1 })}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div className="overflow-x-auto">
|
||||||
|
<table className="min-w-full text-sm">
|
||||||
|
{tbl.headers.length > 0 && (
|
||||||
|
<thead className="bg-slate-50 dark:bg-slate-700">
|
||||||
|
<tr>
|
||||||
|
{tbl.headers.map((h, hi) => (
|
||||||
|
<th key={hi} className="px-3 py-2 text-left font-medium text-slate-700 dark:text-slate-300">{h}</th>
|
||||||
|
))}
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
)}
|
||||||
|
<tbody>
|
||||||
|
{tbl.rows.map((row, ri) => (
|
||||||
|
<tr key={ri} className="border-t border-slate-100 dark:border-slate-700">
|
||||||
|
{row.map((cell, ci) => (
|
||||||
|
<td key={ci} className="px-3 py-2 text-slate-600 dark:text-slate-300">{cell}</td>
|
||||||
|
))}
|
||||||
|
</tr>
|
||||||
|
))}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && result && tables.length === 0 && !taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-amber-50 p-4 ring-1 ring-amber-200 dark:bg-amber-900/20 dark:ring-amber-800">
|
||||||
|
<p className="text-sm text-amber-700 dark:text-amber-400">{t('tools.tableExtractor.noTables')}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
153
frontend/src/components/tools/TranslatePdf.tsx
Normal file
153
frontend/src/components/tools/TranslatePdf.tsx
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
import { useState, useEffect } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { Helmet } from 'react-helmet-async';
|
||||||
|
import { Languages } from 'lucide-react';
|
||||||
|
import FileUploader from '@/components/shared/FileUploader';
|
||||||
|
import ProgressBar from '@/components/shared/ProgressBar';
|
||||||
|
import AdSlot from '@/components/layout/AdSlot';
|
||||||
|
import { useFileUpload } from '@/hooks/useFileUpload';
|
||||||
|
import { useTaskPolling } from '@/hooks/useTaskPolling';
|
||||||
|
import { generateToolSchema } from '@/utils/seo';
|
||||||
|
import { useFileStore } from '@/stores/fileStore';
|
||||||
|
|
||||||
|
const LANGUAGES = [
|
||||||
|
{ value: 'en', label: 'English' },
|
||||||
|
{ value: 'ar', label: 'العربية' },
|
||||||
|
{ value: 'fr', label: 'Français' },
|
||||||
|
{ value: 'es', label: 'Español' },
|
||||||
|
{ value: 'de', label: 'Deutsch' },
|
||||||
|
{ value: 'zh', label: '中文' },
|
||||||
|
{ value: 'ja', label: '日本語' },
|
||||||
|
{ value: 'ko', label: '한국어' },
|
||||||
|
{ value: 'pt', label: 'Português' },
|
||||||
|
{ value: 'ru', label: 'Русский' },
|
||||||
|
{ value: 'tr', label: 'Türkçe' },
|
||||||
|
{ value: 'it', label: 'Italiano' },
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function TranslatePdf() {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
const [phase, setPhase] = useState<'upload' | 'processing' | 'done'>('upload');
|
||||||
|
const [targetLang, setTargetLang] = useState('en');
|
||||||
|
const [translation, setTranslation] = useState('');
|
||||||
|
|
||||||
|
const {
|
||||||
|
file, uploadProgress, isUploading, taskId,
|
||||||
|
error: uploadError, selectFile, startUpload, reset,
|
||||||
|
} = useFileUpload({
|
||||||
|
endpoint: '/pdf-ai/translate',
|
||||||
|
maxSizeMB: 20,
|
||||||
|
acceptedTypes: ['pdf'],
|
||||||
|
extraData: { target_language: targetLang },
|
||||||
|
});
|
||||||
|
|
||||||
|
const { status, result, error: taskError } = useTaskPolling({
|
||||||
|
taskId,
|
||||||
|
onComplete: (r) => {
|
||||||
|
setPhase('done');
|
||||||
|
setTranslation((r as Record<string, unknown>).translation as string || '');
|
||||||
|
},
|
||||||
|
onError: () => setPhase('done'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const storeFile = useFileStore((s) => s.file);
|
||||||
|
const clearStoreFile = useFileStore((s) => s.clearFile);
|
||||||
|
useEffect(() => {
|
||||||
|
if (storeFile) { selectFile(storeFile); clearStoreFile(); }
|
||||||
|
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||||
|
|
||||||
|
const handleUpload = async () => {
|
||||||
|
const id = await startUpload();
|
||||||
|
if (id) setPhase('processing');
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = () => { reset(); setPhase('upload'); setTargetLang('en'); setTranslation(''); };
|
||||||
|
|
||||||
|
const schema = generateToolSchema({
|
||||||
|
name: t('tools.translatePdf.title'),
|
||||||
|
description: t('tools.translatePdf.description'),
|
||||||
|
url: `${window.location.origin}/tools/translate-pdf`,
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Helmet>
|
||||||
|
<title>{t('tools.translatePdf.title')} — {t('common.appName')}</title>
|
||||||
|
<meta name="description" content={t('tools.translatePdf.description')} />
|
||||||
|
<link rel="canonical" href={`${window.location.origin}/tools/translate-pdf`} />
|
||||||
|
<script type="application/ld+json">{JSON.stringify(schema)}</script>
|
||||||
|
</Helmet>
|
||||||
|
|
||||||
|
<div className="mx-auto max-w-2xl">
|
||||||
|
<div className="mb-8 text-center">
|
||||||
|
<div className="mx-auto mb-4 flex h-16 w-16 items-center justify-center rounded-2xl bg-purple-100 dark:bg-purple-900/30">
|
||||||
|
<Languages className="h-8 w-8 text-purple-600 dark:text-purple-400" />
|
||||||
|
</div>
|
||||||
|
<h1 className="section-heading">{t('tools.translatePdf.title')}</h1>
|
||||||
|
<p className="mt-2 text-slate-500 dark:text-slate-400">{t('tools.translatePdf.description')}</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<AdSlot slot="top-banner" format="horizontal" className="mb-6" />
|
||||||
|
|
||||||
|
{phase === 'upload' && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<FileUploader
|
||||||
|
onFileSelect={selectFile} file={file}
|
||||||
|
accept={{ 'application/pdf': ['.pdf'] }}
|
||||||
|
maxSizeMB={20} isUploading={isUploading}
|
||||||
|
uploadProgress={uploadProgress} error={uploadError}
|
||||||
|
onReset={handleReset} acceptLabel="PDF (.pdf)"
|
||||||
|
/>
|
||||||
|
{file && !isUploading && (
|
||||||
|
<>
|
||||||
|
<div className="rounded-2xl bg-white p-5 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<label className="mb-2 block text-sm font-medium text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.translatePdf.targetLang')}
|
||||||
|
</label>
|
||||||
|
<select value={targetLang} onChange={(e) => setTargetLang(e.target.value)}
|
||||||
|
className="w-full rounded-lg border border-slate-300 px-3 py-2 text-sm dark:border-slate-600 dark:bg-slate-700 dark:text-slate-200">
|
||||||
|
{LANGUAGES.map((lang) => (
|
||||||
|
<option key={lang.value} value={lang.value}>{lang.label}</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleUpload} className="btn-primary w-full">
|
||||||
|
{t('tools.translatePdf.shortDesc')}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'processing' && !result && (
|
||||||
|
<ProgressBar state={status?.state || 'PENDING'} message={status?.progress} />
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && translation && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-2xl bg-white p-6 ring-1 ring-slate-200 dark:bg-slate-800 dark:ring-slate-700">
|
||||||
|
<h3 className="mb-3 text-sm font-semibold text-slate-700 dark:text-slate-300">
|
||||||
|
{t('tools.translatePdf.resultTitle')}
|
||||||
|
</h3>
|
||||||
|
<div className="prose prose-sm dark:prose-invert max-w-none whitespace-pre-wrap text-slate-600 dark:text-slate-300">
|
||||||
|
{translation}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{phase === 'done' && taskError && !translation && (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="rounded-xl bg-red-50 p-4 ring-1 ring-red-200 dark:bg-red-900/20 dark:ring-red-800">
|
||||||
|
<p className="text-sm text-red-700 dark:text-red-400">{taskError}</p>
|
||||||
|
</div>
|
||||||
|
<button onClick={handleReset} className="btn-secondary w-full">{t('common.startOver')}</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<AdSlot slot="bottom-banner" className="mt-8" />
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -383,6 +383,84 @@
|
|||||||
"chatSuggestion3": "اقترح عناوين أفضل",
|
"chatSuggestion3": "اقترح عناوين أفضل",
|
||||||
"chatSuggestion4": "أضف معالجة الأخطاء",
|
"chatSuggestion4": "أضف معالجة الأخطاء",
|
||||||
"sendMessage": "إرسال"
|
"sendMessage": "إرسال"
|
||||||
|
},
|
||||||
|
"compressImage": {
|
||||||
|
"title": "ضغط الصورة",
|
||||||
|
"description": "قلّل حجم الصورة مع الحفاظ على الجودة. يدعم PNG و JPG و WebP.",
|
||||||
|
"shortDesc": "ضغط الصورة",
|
||||||
|
"quality": "الجودة"
|
||||||
|
},
|
||||||
|
"pdfToExcel": {
|
||||||
|
"title": "PDF إلى Excel",
|
||||||
|
"description": "استخرج الجداول من ملفات PDF وحوّلها إلى جداول بيانات Excel.",
|
||||||
|
"shortDesc": "PDF → Excel"
|
||||||
|
},
|
||||||
|
"removeWatermark": {
|
||||||
|
"title": "إزالة العلامة المائية",
|
||||||
|
"description": "أزل العلامات المائية النصية من ملفات PDF تلقائياً.",
|
||||||
|
"shortDesc": "إزالة العلامة المائية"
|
||||||
|
},
|
||||||
|
"reorderPdf": {
|
||||||
|
"title": "إعادة ترتيب صفحات PDF",
|
||||||
|
"description": "أعد ترتيب صفحات PDF بأي ترتيب تريده.",
|
||||||
|
"shortDesc": "إعادة الترتيب",
|
||||||
|
"orderLabel": "ترتيب الصفحات الجديد",
|
||||||
|
"orderPlaceholder": "مثال: 3,1,2,5,4",
|
||||||
|
"orderHint": "أدخل أرقام الصفحات مفصولة بفواصل بالترتيب المطلوب."
|
||||||
|
},
|
||||||
|
"extractPages": {
|
||||||
|
"title": "استخراج صفحات PDF",
|
||||||
|
"description": "استخرج صفحات محددة من PDF إلى مستند جديد.",
|
||||||
|
"shortDesc": "استخراج الصفحات",
|
||||||
|
"pagesLabel": "الصفحات المطلوبة",
|
||||||
|
"pagesPlaceholder": "مثال: 1,3,5-8",
|
||||||
|
"pagesHint": "أدخل أرقام الصفحات أو نطاقات مفصولة بفواصل."
|
||||||
|
},
|
||||||
|
"qrCode": {
|
||||||
|
"title": "مولّد رمز QR",
|
||||||
|
"description": "أنشئ رموز QR من نصوص أو روابط أو أي بيانات. خصّص الحجم وحمّل بصيغة PNG.",
|
||||||
|
"shortDesc": "إنشاء رمز QR",
|
||||||
|
"dataLabel": "نص أو رابط",
|
||||||
|
"dataPlaceholder": "أدخل نصاً أو رابطاً أو أي بيانات...",
|
||||||
|
"sizeLabel": "الحجم"
|
||||||
|
},
|
||||||
|
"htmlToPdf": {
|
||||||
|
"title": "HTML إلى PDF",
|
||||||
|
"description": "حوّل ملفات HTML إلى مستندات PDF مع دعم كامل للتنسيق.",
|
||||||
|
"shortDesc": "HTML → PDF"
|
||||||
|
},
|
||||||
|
"chatPdf": {
|
||||||
|
"title": "محادثة مع PDF",
|
||||||
|
"description": "اطرح أسئلة حول مستند PDF واحصل على إجابات بالذكاء الاصطناعي.",
|
||||||
|
"shortDesc": "اسأل الذكاء الاصطناعي",
|
||||||
|
"questionLabel": "سؤالك",
|
||||||
|
"questionPlaceholder": "ماذا تريد أن تعرف عن هذا المستند؟",
|
||||||
|
"answer": "إجابة الذكاء الاصطناعي"
|
||||||
|
},
|
||||||
|
"summarizePdf": {
|
||||||
|
"title": "تلخيص PDF",
|
||||||
|
"description": "احصل على ملخص مولّد بالذكاء الاصطناعي لمستند PDF في ثوانٍ.",
|
||||||
|
"shortDesc": "تلخيص PDF",
|
||||||
|
"lengthLabel": "طول الملخص",
|
||||||
|
"short": "قصير",
|
||||||
|
"medium": "متوسط",
|
||||||
|
"long": "مفصّل",
|
||||||
|
"resultTitle": "الملخص"
|
||||||
|
},
|
||||||
|
"translatePdf": {
|
||||||
|
"title": "ترجمة PDF",
|
||||||
|
"description": "ترجم محتوى مستند PDF إلى أي لغة باستخدام الذكاء الاصطناعي.",
|
||||||
|
"shortDesc": "ترجمة PDF",
|
||||||
|
"targetLang": "اللغة المستهدفة",
|
||||||
|
"resultTitle": "الترجمة"
|
||||||
|
},
|
||||||
|
"tableExtractor": {
|
||||||
|
"title": "استخراج الجداول من PDF",
|
||||||
|
"description": "اكتشف واستخرج الجداول من مستندات PDF إلى بيانات منظمة.",
|
||||||
|
"shortDesc": "استخراج الجداول",
|
||||||
|
"tablesFound": "تم العثور على {{count}} جدول(جداول)",
|
||||||
|
"tablePage": "الصفحة {{page}} — الجدول {{index}}",
|
||||||
|
"noTables": "لم يتم العثور على جداول في هذا المستند."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"account": {
|
"account": {
|
||||||
|
|||||||
@@ -383,6 +383,84 @@
|
|||||||
"chatSuggestion3": "Suggest better titles",
|
"chatSuggestion3": "Suggest better titles",
|
||||||
"chatSuggestion4": "Add error handling",
|
"chatSuggestion4": "Add error handling",
|
||||||
"sendMessage": "Send"
|
"sendMessage": "Send"
|
||||||
|
},
|
||||||
|
"compressImage": {
|
||||||
|
"title": "Compress Image",
|
||||||
|
"description": "Reduce image file size while maintaining quality. Supports PNG, JPG, and WebP.",
|
||||||
|
"shortDesc": "Compress Image",
|
||||||
|
"quality": "Quality"
|
||||||
|
},
|
||||||
|
"pdfToExcel": {
|
||||||
|
"title": "PDF to Excel",
|
||||||
|
"description": "Extract tables from PDF files and convert them to Excel spreadsheets.",
|
||||||
|
"shortDesc": "PDF → Excel"
|
||||||
|
},
|
||||||
|
"removeWatermark": {
|
||||||
|
"title": "Remove Watermark",
|
||||||
|
"description": "Remove text watermarks from PDF files automatically.",
|
||||||
|
"shortDesc": "Remove Watermark"
|
||||||
|
},
|
||||||
|
"reorderPdf": {
|
||||||
|
"title": "Reorder PDF Pages",
|
||||||
|
"description": "Rearrange the pages of your PDF in any order you want.",
|
||||||
|
"shortDesc": "Reorder Pages",
|
||||||
|
"orderLabel": "New Page Order",
|
||||||
|
"orderPlaceholder": "e.g. 3,1,2,5,4",
|
||||||
|
"orderHint": "Enter page numbers separated by commas in the desired order."
|
||||||
|
},
|
||||||
|
"extractPages": {
|
||||||
|
"title": "Extract PDF Pages",
|
||||||
|
"description": "Extract specific pages from a PDF into a new document.",
|
||||||
|
"shortDesc": "Extract Pages",
|
||||||
|
"pagesLabel": "Pages to Extract",
|
||||||
|
"pagesPlaceholder": "e.g. 1,3,5-8",
|
||||||
|
"pagesHint": "Enter page numbers or ranges separated by commas."
|
||||||
|
},
|
||||||
|
"qrCode": {
|
||||||
|
"title": "QR Code Generator",
|
||||||
|
"description": "Generate QR codes from text, URLs, or any data. Customize size and download as PNG.",
|
||||||
|
"shortDesc": "Generate QR Code",
|
||||||
|
"dataLabel": "Text or URL",
|
||||||
|
"dataPlaceholder": "Enter text, URL, or any data...",
|
||||||
|
"sizeLabel": "Size"
|
||||||
|
},
|
||||||
|
"htmlToPdf": {
|
||||||
|
"title": "HTML to PDF",
|
||||||
|
"description": "Convert HTML files to PDF documents with full styling support.",
|
||||||
|
"shortDesc": "HTML → PDF"
|
||||||
|
},
|
||||||
|
"chatPdf": {
|
||||||
|
"title": "Chat with PDF",
|
||||||
|
"description": "Ask questions about your PDF document and get AI-powered answers.",
|
||||||
|
"shortDesc": "Ask AI",
|
||||||
|
"questionLabel": "Your Question",
|
||||||
|
"questionPlaceholder": "What would you like to know about this document?",
|
||||||
|
"answer": "AI Answer"
|
||||||
|
},
|
||||||
|
"summarizePdf": {
|
||||||
|
"title": "Summarize PDF",
|
||||||
|
"description": "Get an AI-generated summary of your PDF document in seconds.",
|
||||||
|
"shortDesc": "Summarize PDF",
|
||||||
|
"lengthLabel": "Summary Length",
|
||||||
|
"short": "Short",
|
||||||
|
"medium": "Medium",
|
||||||
|
"long": "Detailed",
|
||||||
|
"resultTitle": "Summary"
|
||||||
|
},
|
||||||
|
"translatePdf": {
|
||||||
|
"title": "Translate PDF",
|
||||||
|
"description": "Translate your PDF document content to any language using AI.",
|
||||||
|
"shortDesc": "Translate PDF",
|
||||||
|
"targetLang": "Target Language",
|
||||||
|
"resultTitle": "Translation"
|
||||||
|
},
|
||||||
|
"tableExtractor": {
|
||||||
|
"title": "Extract Tables from PDF",
|
||||||
|
"description": "Detect and extract tables from PDF documents into structured data.",
|
||||||
|
"shortDesc": "Extract Tables",
|
||||||
|
"tablesFound": "{{count}} table(s) found",
|
||||||
|
"tablePage": "Page {{page}} — Table {{index}}",
|
||||||
|
"noTables": "No tables were found in this document."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"account": {
|
"account": {
|
||||||
|
|||||||
@@ -383,6 +383,84 @@
|
|||||||
"chatSuggestion3": "Suggérer de meilleurs titres",
|
"chatSuggestion3": "Suggérer de meilleurs titres",
|
||||||
"chatSuggestion4": "Ajouter la gestion des erreurs",
|
"chatSuggestion4": "Ajouter la gestion des erreurs",
|
||||||
"sendMessage": "Envoyer"
|
"sendMessage": "Envoyer"
|
||||||
|
},
|
||||||
|
"compressImage": {
|
||||||
|
"title": "Compresser une image",
|
||||||
|
"description": "Réduisez la taille des images tout en préservant la qualité. Supporte PNG, JPG et WebP.",
|
||||||
|
"shortDesc": "Compresser l'image",
|
||||||
|
"quality": "Qualité"
|
||||||
|
},
|
||||||
|
"pdfToExcel": {
|
||||||
|
"title": "PDF vers Excel",
|
||||||
|
"description": "Extrayez les tableaux des fichiers PDF et convertissez-les en feuilles de calcul Excel.",
|
||||||
|
"shortDesc": "PDF → Excel"
|
||||||
|
},
|
||||||
|
"removeWatermark": {
|
||||||
|
"title": "Supprimer le filigrane",
|
||||||
|
"description": "Supprimez automatiquement les filigranes textuels des fichiers PDF.",
|
||||||
|
"shortDesc": "Supprimer le filigrane"
|
||||||
|
},
|
||||||
|
"reorderPdf": {
|
||||||
|
"title": "Réorganiser les pages PDF",
|
||||||
|
"description": "Réorganisez les pages de votre PDF dans l'ordre souhaité.",
|
||||||
|
"shortDesc": "Réorganiser les pages",
|
||||||
|
"orderLabel": "Nouvel ordre des pages",
|
||||||
|
"orderPlaceholder": "ex. 3,1,2,5,4",
|
||||||
|
"orderHint": "Entrez les numéros de pages séparés par des virgules dans l'ordre souhaité."
|
||||||
|
},
|
||||||
|
"extractPages": {
|
||||||
|
"title": "Extraire des pages PDF",
|
||||||
|
"description": "Extrayez des pages spécifiques d'un PDF dans un nouveau document.",
|
||||||
|
"shortDesc": "Extraire les pages",
|
||||||
|
"pagesLabel": "Pages à extraire",
|
||||||
|
"pagesPlaceholder": "ex. 1,3,5-8",
|
||||||
|
"pagesHint": "Entrez les numéros de pages ou les plages séparés par des virgules."
|
||||||
|
},
|
||||||
|
"qrCode": {
|
||||||
|
"title": "Générateur de code QR",
|
||||||
|
"description": "Générez des codes QR à partir de texte, d'URL ou de données. Personnalisez la taille et téléchargez en PNG.",
|
||||||
|
"shortDesc": "Générer un code QR",
|
||||||
|
"dataLabel": "Texte ou URL",
|
||||||
|
"dataPlaceholder": "Entrez du texte, une URL ou des données...",
|
||||||
|
"sizeLabel": "Taille"
|
||||||
|
},
|
||||||
|
"htmlToPdf": {
|
||||||
|
"title": "HTML vers PDF",
|
||||||
|
"description": "Convertissez des fichiers HTML en documents PDF avec prise en charge complète du style.",
|
||||||
|
"shortDesc": "HTML → PDF"
|
||||||
|
},
|
||||||
|
"chatPdf": {
|
||||||
|
"title": "Discuter avec un PDF",
|
||||||
|
"description": "Posez des questions sur votre document PDF et obtenez des réponses par IA.",
|
||||||
|
"shortDesc": "Demander à l'IA",
|
||||||
|
"questionLabel": "Votre question",
|
||||||
|
"questionPlaceholder": "Que souhaitez-vous savoir sur ce document ?",
|
||||||
|
"answer": "Réponse de l'IA"
|
||||||
|
},
|
||||||
|
"summarizePdf": {
|
||||||
|
"title": "Résumer un PDF",
|
||||||
|
"description": "Obtenez un résumé généré par IA de votre document PDF en quelques secondes.",
|
||||||
|
"shortDesc": "Résumer le PDF",
|
||||||
|
"lengthLabel": "Longueur du résumé",
|
||||||
|
"short": "Court",
|
||||||
|
"medium": "Moyen",
|
||||||
|
"long": "Détaillé",
|
||||||
|
"resultTitle": "Résumé"
|
||||||
|
},
|
||||||
|
"translatePdf": {
|
||||||
|
"title": "Traduire un PDF",
|
||||||
|
"description": "Traduisez le contenu de votre document PDF dans n'importe quelle langue grâce à l'IA.",
|
||||||
|
"shortDesc": "Traduire le PDF",
|
||||||
|
"targetLang": "Langue cible",
|
||||||
|
"resultTitle": "Traduction"
|
||||||
|
},
|
||||||
|
"tableExtractor": {
|
||||||
|
"title": "Extraire les tableaux d'un PDF",
|
||||||
|
"description": "Détectez et extrayez les tableaux des documents PDF en données structurées.",
|
||||||
|
"shortDesc": "Extraire les tableaux",
|
||||||
|
"tablesFound": "{{count}} tableau(x) trouvé(s)",
|
||||||
|
"tablePage": "Page {{page}} — Tableau {{index}}",
|
||||||
|
"noTables": "Aucun tableau n'a été trouvé dans ce document."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"account": {
|
"account": {
|
||||||
|
|||||||
@@ -21,6 +21,13 @@ import {
|
|||||||
GitBranch,
|
GitBranch,
|
||||||
Scaling,
|
Scaling,
|
||||||
ScanText,
|
ScanText,
|
||||||
|
Sheet,
|
||||||
|
ArrowUpDown,
|
||||||
|
QrCode,
|
||||||
|
Code,
|
||||||
|
MessageSquare,
|
||||||
|
Languages,
|
||||||
|
Table,
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
import ToolCard from '@/components/shared/ToolCard';
|
import ToolCard from '@/components/shared/ToolCard';
|
||||||
import HeroUploadZone from '@/components/shared/HeroUploadZone';
|
import HeroUploadZone from '@/components/shared/HeroUploadZone';
|
||||||
@@ -48,14 +55,25 @@ const pdfTools: ToolInfo[] = [
|
|||||||
{ key: 'unlockPdf', path: '/tools/unlock-pdf', icon: <Unlock className="h-6 w-6 text-green-600" />, bgColor: 'bg-green-50' },
|
{ key: 'unlockPdf', path: '/tools/unlock-pdf', icon: <Unlock className="h-6 w-6 text-green-600" />, bgColor: 'bg-green-50' },
|
||||||
{ key: 'pageNumbers', path: '/tools/page-numbers', icon: <ListOrdered className="h-6 w-6 text-sky-600" />, bgColor: 'bg-sky-50' },
|
{ key: 'pageNumbers', path: '/tools/page-numbers', icon: <ListOrdered className="h-6 w-6 text-sky-600" />, bgColor: 'bg-sky-50' },
|
||||||
{ key: 'pdfFlowchart', path: '/tools/pdf-flowchart', icon: <GitBranch className="h-6 w-6 text-indigo-600" />, bgColor: 'bg-indigo-50' },
|
{ key: 'pdfFlowchart', path: '/tools/pdf-flowchart', icon: <GitBranch className="h-6 w-6 text-indigo-600" />, bgColor: 'bg-indigo-50' },
|
||||||
|
{ key: 'pdfToExcel', path: '/tools/pdf-to-excel', icon: <Sheet className="h-6 w-6 text-green-600" />, bgColor: 'bg-green-50' },
|
||||||
|
{ key: 'removeWatermark', path: '/tools/remove-watermark-pdf', icon: <Droplets className="h-6 w-6 text-rose-600" />, bgColor: 'bg-rose-50' },
|
||||||
|
{ key: 'reorderPdf', path: '/tools/reorder-pdf', icon: <ArrowUpDown className="h-6 w-6 text-violet-600" />, bgColor: 'bg-violet-50' },
|
||||||
|
{ key: 'extractPages', path: '/tools/extract-pages', icon: <FileOutput className="h-6 w-6 text-amber-600" />, bgColor: 'bg-amber-50' },
|
||||||
|
{ key: 'chatPdf', path: '/tools/chat-pdf', icon: <MessageSquare className="h-6 w-6 text-blue-600" />, bgColor: 'bg-blue-50' },
|
||||||
|
{ key: 'summarizePdf', path: '/tools/summarize-pdf', icon: <FileText className="h-6 w-6 text-emerald-600" />, bgColor: 'bg-emerald-50' },
|
||||||
|
{ key: 'translatePdf', path: '/tools/translate-pdf', icon: <Languages className="h-6 w-6 text-purple-600" />, bgColor: 'bg-purple-50' },
|
||||||
|
{ key: 'tableExtractor', path: '/tools/extract-tables', icon: <Table className="h-6 w-6 text-teal-600" />, bgColor: 'bg-teal-50' },
|
||||||
];
|
];
|
||||||
|
|
||||||
const otherTools: ToolInfo[] = [
|
const otherTools: ToolInfo[] = [
|
||||||
{ key: 'imageConvert', path: '/tools/image-converter', icon: <ImageIcon className="h-6 w-6 text-purple-600" />, bgColor: 'bg-purple-50' },
|
{ key: 'imageConvert', path: '/tools/image-converter', icon: <ImageIcon className="h-6 w-6 text-purple-600" />, bgColor: 'bg-purple-50' },
|
||||||
{ key: 'imageResize', path: '/tools/image-resize', icon: <Scaling className="h-6 w-6 text-teal-600" />, bgColor: 'bg-teal-50' },
|
{ key: 'imageResize', path: '/tools/image-resize', icon: <Scaling className="h-6 w-6 text-teal-600" />, bgColor: 'bg-teal-50' },
|
||||||
|
{ key: 'compressImage', path: '/tools/compress-image', icon: <Minimize2 className="h-6 w-6 text-orange-600" />, bgColor: 'bg-orange-50' },
|
||||||
{ key: 'ocr', path: '/tools/ocr', icon: <ScanText className="h-6 w-6 text-amber-600" />, bgColor: 'bg-amber-50' },
|
{ key: 'ocr', path: '/tools/ocr', icon: <ScanText className="h-6 w-6 text-amber-600" />, bgColor: 'bg-amber-50' },
|
||||||
{ key: 'removeBg', path: '/tools/remove-background', icon: <Eraser className="h-6 w-6 text-fuchsia-600" />, bgColor: 'bg-fuchsia-50' },
|
{ key: 'removeBg', path: '/tools/remove-background', icon: <Eraser className="h-6 w-6 text-fuchsia-600" />, bgColor: 'bg-fuchsia-50' },
|
||||||
{ key: 'videoToGif', path: '/tools/video-to-gif', icon: <Film className="h-6 w-6 text-emerald-600" />, bgColor: 'bg-emerald-50' },
|
{ key: 'videoToGif', path: '/tools/video-to-gif', icon: <Film className="h-6 w-6 text-emerald-600" />, bgColor: 'bg-emerald-50' },
|
||||||
|
{ key: 'qrCode', path: '/tools/qr-code', icon: <QrCode className="h-6 w-6 text-indigo-600" />, bgColor: 'bg-indigo-50' },
|
||||||
|
{ key: 'htmlToPdf', path: '/tools/html-to-pdf', icon: <Code className="h-6 w-6 text-sky-600" />, bgColor: 'bg-sky-50' },
|
||||||
{ key: 'wordCounter', path: '/tools/word-counter', icon: <Hash className="h-6 w-6 text-blue-600" />, bgColor: 'bg-blue-50' },
|
{ key: 'wordCounter', path: '/tools/word-counter', icon: <Hash className="h-6 w-6 text-blue-600" />, bgColor: 'bg-blue-50' },
|
||||||
{ key: 'textCleaner', path: '/tools/text-cleaner', icon: <Eraser className="h-6 w-6 text-indigo-600" />, bgColor: 'bg-indigo-50' },
|
{ key: 'textCleaner', path: '/tools/text-cleaner', icon: <Eraser className="h-6 w-6 text-indigo-600" />, bgColor: 'bg-indigo-50' },
|
||||||
];
|
];
|
||||||
|
|||||||
@@ -79,6 +79,15 @@ export interface TaskResult {
|
|||||||
// OCR-specific fields
|
// OCR-specific fields
|
||||||
text?: string;
|
text?: string;
|
||||||
char_count?: number;
|
char_count?: number;
|
||||||
|
// AI PDF fields
|
||||||
|
reply?: string;
|
||||||
|
summary?: string;
|
||||||
|
translation?: string;
|
||||||
|
target_language?: string;
|
||||||
|
pages_analyzed?: number;
|
||||||
|
// Table extraction fields
|
||||||
|
tables?: Array<{ page: number; table_index: number; headers: string[]; rows: string[][] }>;
|
||||||
|
tables_found?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface AuthUser {
|
export interface AuthUser {
|
||||||
|
|||||||
@@ -17,6 +17,11 @@ import {
|
|||||||
GitBranch,
|
GitBranch,
|
||||||
Scaling,
|
Scaling,
|
||||||
ScanText,
|
ScanText,
|
||||||
|
Sheet,
|
||||||
|
ArrowUpDown,
|
||||||
|
MessageSquare,
|
||||||
|
Languages,
|
||||||
|
Table,
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
import type { ComponentType, SVGProps } from 'react';
|
import type { ComponentType, SVGProps } from 'react';
|
||||||
|
|
||||||
@@ -48,6 +53,14 @@ const pdfTools: ToolOption[] = [
|
|||||||
{ key: 'pdfEditor', path: '/tools/pdf-editor', icon: PenLine, bgColor: 'bg-rose-100 dark:bg-rose-900/30', iconColor: 'text-rose-600 dark:text-rose-400' },
|
{ key: 'pdfEditor', path: '/tools/pdf-editor', icon: PenLine, bgColor: 'bg-rose-100 dark:bg-rose-900/30', iconColor: 'text-rose-600 dark:text-rose-400' },
|
||||||
{ key: 'pdfFlowchart', path: '/tools/pdf-flowchart', icon: GitBranch, bgColor: 'bg-indigo-100 dark:bg-indigo-900/30', iconColor: 'text-indigo-600 dark:text-indigo-400' },
|
{ key: 'pdfFlowchart', path: '/tools/pdf-flowchart', icon: GitBranch, bgColor: 'bg-indigo-100 dark:bg-indigo-900/30', iconColor: 'text-indigo-600 dark:text-indigo-400' },
|
||||||
{ key: 'ocr', path: '/tools/ocr', icon: ScanText, bgColor: 'bg-amber-100 dark:bg-amber-900/30', iconColor: 'text-amber-600 dark:text-amber-400' },
|
{ key: 'ocr', path: '/tools/ocr', icon: ScanText, bgColor: 'bg-amber-100 dark:bg-amber-900/30', iconColor: 'text-amber-600 dark:text-amber-400' },
|
||||||
|
{ key: 'pdfToExcel', path: '/tools/pdf-to-excel', icon: Sheet, bgColor: 'bg-green-100 dark:bg-green-900/30', iconColor: 'text-green-600 dark:text-green-400' },
|
||||||
|
{ key: 'removeWatermark', path: '/tools/remove-watermark-pdf', icon: Droplets, bgColor: 'bg-rose-100 dark:bg-rose-900/30', iconColor: 'text-rose-600 dark:text-rose-400' },
|
||||||
|
{ key: 'reorderPdf', path: '/tools/reorder-pdf', icon: ArrowUpDown, bgColor: 'bg-violet-100 dark:bg-violet-900/30', iconColor: 'text-violet-600 dark:text-violet-400' },
|
||||||
|
{ key: 'extractPages', path: '/tools/extract-pages', icon: FileOutput, bgColor: 'bg-amber-100 dark:bg-amber-900/30', iconColor: 'text-amber-600 dark:text-amber-400' },
|
||||||
|
{ key: 'chatPdf', path: '/tools/chat-pdf', icon: MessageSquare, bgColor: 'bg-blue-100 dark:bg-blue-900/30', iconColor: 'text-blue-600 dark:text-blue-400' },
|
||||||
|
{ key: 'summarizePdf', path: '/tools/summarize-pdf', icon: FileText, bgColor: 'bg-emerald-100 dark:bg-emerald-900/30', iconColor: 'text-emerald-600 dark:text-emerald-400' },
|
||||||
|
{ key: 'translatePdf', path: '/tools/translate-pdf', icon: Languages, bgColor: 'bg-purple-100 dark:bg-purple-900/30', iconColor: 'text-purple-600 dark:text-purple-400' },
|
||||||
|
{ key: 'tableExtractor', path: '/tools/extract-tables', icon: Table, bgColor: 'bg-teal-100 dark:bg-teal-900/30', iconColor: 'text-teal-600 dark:text-teal-400' },
|
||||||
];
|
];
|
||||||
|
|
||||||
/** Image tools available when an image is uploaded */
|
/** Image tools available when an image is uploaded */
|
||||||
@@ -57,6 +70,7 @@ const imageTools: ToolOption[] = [
|
|||||||
{ key: 'ocr', path: '/tools/ocr', icon: ScanText, bgColor: 'bg-amber-100 dark:bg-amber-900/30', iconColor: 'text-amber-600 dark:text-amber-400' },
|
{ key: 'ocr', path: '/tools/ocr', icon: ScanText, bgColor: 'bg-amber-100 dark:bg-amber-900/30', iconColor: 'text-amber-600 dark:text-amber-400' },
|
||||||
{ key: 'removeBg', path: '/tools/remove-background', icon: ImageIcon, bgColor: 'bg-fuchsia-100 dark:bg-fuchsia-900/30', iconColor: 'text-fuchsia-600 dark:text-fuchsia-400' },
|
{ key: 'removeBg', path: '/tools/remove-background', icon: ImageIcon, bgColor: 'bg-fuchsia-100 dark:bg-fuchsia-900/30', iconColor: 'text-fuchsia-600 dark:text-fuchsia-400' },
|
||||||
{ key: 'imagesToPdf', path: '/tools/images-to-pdf', icon: FileImage, bgColor: 'bg-lime-100 dark:bg-lime-900/30', iconColor: 'text-lime-600 dark:text-lime-400' },
|
{ key: 'imagesToPdf', path: '/tools/images-to-pdf', icon: FileImage, bgColor: 'bg-lime-100 dark:bg-lime-900/30', iconColor: 'text-lime-600 dark:text-lime-400' },
|
||||||
|
{ key: 'compressImage', path: '/tools/compress-image', icon: Minimize2, bgColor: 'bg-orange-100 dark:bg-orange-900/30', iconColor: 'text-orange-600 dark:text-orange-400' },
|
||||||
];
|
];
|
||||||
|
|
||||||
/** Video tools available when a video is uploaded */
|
/** Video tools available when a video is uploaded */
|
||||||
|
|||||||
Reference in New Issue
Block a user