templates: add multi-category filter (repeatable or CSV) to GET /api/templates/search; add has_keywords filter; add categories listing endpoint with counts; update docs; add tests

This commit is contained in:
HotSwapp
2025-08-15 15:04:40 -05:00
parent 21c6b285d6
commit e3a279dba7
17 changed files with 3727 additions and 2 deletions

View File

@@ -44,7 +44,7 @@ IMPORT_ORDER = [
"TRNSTYPE.csv", "TRNSLKUP.csv", "FOOTERS.csv", "SETUP.csv", "PRINTERS.csv", "TRNSTYPE.csv", "TRNSLKUP.csv", "FOOTERS.csv", "SETUP.csv", "PRINTERS.csv",
"INX_LKUP.csv", "INX_LKUP.csv",
"ROLODEX.csv", "PHONE.csv", "FILES.csv", "LEDGER.csv", "TRNSACTN.csv", "ROLODEX.csv", "PHONE.csv", "FILES.csv", "LEDGER.csv", "TRNSACTN.csv",
"QDROS.csv", "PENSIONS.csv", "LIFETABL.csv", "NUMBERAL.csv", "PLANINFO.csv", "RESULTS.csv", "PAYMENTS.csv", "DEPOSITS.csv", "QDROS.csv", "PENSIONS.csv", "SCHEDULE.csv", "MARRIAGE.csv", "DEATH.csv", "SEPARATE.csv", "LIFETABL.csv", "NUMBERAL.csv", "PLANINFO.csv", "RESULTS.csv", "PAYMENTS.csv", "DEPOSITS.csv",
"FILENOTS.csv", "FORM_INX.csv", "FORM_LST.csv", "FVARLKUP.csv", "RVARLKUP.csv" "FILENOTS.csv", "FORM_INX.csv", "FORM_LST.csv", "FVARLKUP.csv", "RVARLKUP.csv"
] ]
@@ -720,7 +720,9 @@ def convert_value(value: str, field_name: str) -> Any:
value = value.strip() value = value.strip()
# Date fields # Date fields
if any(word in field_name.lower() for word in ["date", "dob", "birth", "opened", "closed", "judgment", "valuation", "married", "vests_on"]): if any(word in field_name.lower() for word in [
"date", "dob", "birth", "opened", "closed", "judgment", "valuation", "married", "vests_on", "service"
]):
parsed_date = parse_date(value) parsed_date = parse_date(value)
return parsed_date return parsed_date

1024
app/api/pensions.py Normal file

File diff suppressed because it is too large Load Diff

471
app/api/templates.py Normal file
View File

@@ -0,0 +1,471 @@
"""
Document Template API (MVP)
Endpoints:
- POST /api/templates/upload
- GET /api/templates/search
- GET /api/templates/{id}
- POST /api/templates/{id}/versions
- GET /api/templates/{id}/versions
- POST /api/templates/{id}/preview
"""
from __future__ import annotations
from typing import List, Optional, Dict, Any
from fastapi import APIRouter, Depends, HTTPException, status, UploadFile, File, Form, Query
from sqlalchemy.orm import Session
from sqlalchemy import func, or_, exists
import hashlib
from app.database.base import get_db
from app.auth.security import get_current_user
from app.models.user import User
from app.models.templates import DocumentTemplate, DocumentTemplateVersion, TemplateKeyword
from app.services.storage import get_default_storage
from app.services.template_merge import extract_tokens_from_bytes, build_context, resolve_tokens, render_docx
router = APIRouter()
from pydantic import BaseModel, Field
class TemplateResponse(BaseModel):
id: int
name: str
description: Optional[str] = None
category: Optional[str] = None
active: bool
current_version_id: Optional[int] = None
class VersionResponse(BaseModel):
id: int
template_id: int
semantic_version: str
mime_type: str
size: int
checksum: str
changelog: Optional[str] = None
is_approved: bool
class SearchResponseItem(BaseModel):
id: int
name: str
category: Optional[str] = None
active: bool
latest_version: Optional[str] = None
class KeywordsRequest(BaseModel):
keywords: List[str]
class KeywordsResponse(BaseModel):
keywords: List[str]
class PreviewRequest(BaseModel):
context: Dict[str, Any] = Field(default_factory=dict)
version_id: Optional[int] = None
class PreviewResponse(BaseModel):
resolved: Dict[str, Any]
unresolved: List[str]
output_mime_type: str
output_size: int
class CategoryCount(BaseModel):
category: Optional[str] = None
count: int
@router.post("/upload", response_model=TemplateResponse)
async def upload_template(
name: str = Form(...),
category: Optional[str] = Form("GENERAL"),
description: Optional[str] = Form(None),
semantic_version: str = Form("1.0.0"),
file: UploadFile = File(...),
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
if file.content_type not in {"application/vnd.openxmlformats-officedocument.wordprocessingml.document", "application/pdf"}:
raise HTTPException(status_code=400, detail="Only .docx or .pdf templates are supported")
content = await file.read()
if not content:
raise HTTPException(status_code=400, detail="No file uploaded")
sha256 = hashlib.sha256(content).hexdigest()
storage = get_default_storage()
storage_path = storage.save_bytes(content=content, filename_hint=file.filename or "template.bin", subdir="templates")
template = DocumentTemplate(name=name, description=description, category=category, active=True, created_by=getattr(current_user, "username", None))
db.add(template)
db.flush() # get id
version = DocumentTemplateVersion(
template_id=template.id,
semantic_version=semantic_version,
storage_path=storage_path,
mime_type=file.content_type,
size=len(content),
checksum=sha256,
changelog=None,
created_by=getattr(current_user, "username", None),
is_approved=True,
)
db.add(version)
db.flush()
template.current_version_id = version.id
db.commit()
db.refresh(template)
return TemplateResponse(
id=template.id,
name=template.name,
description=template.description,
category=template.category,
active=template.active,
current_version_id=template.current_version_id,
)
@router.get("/search", response_model=List[SearchResponseItem])
async def search_templates(
q: Optional[str] = None,
category: Optional[List[str]] = Query(
None,
description=(
"Filter by category. Repeat the parameter (e.g., ?category=A&category=B) "
"or pass a comma-separated list (e.g., ?category=A,B)."
),
),
keywords: Optional[List[str]] = Query(None),
keywords_mode: str = Query("any", description="Keyword match mode: any|all (default any)"),
has_keywords: Optional[bool] = Query(
None,
description=(
"When true, only templates that have one or more keywords are returned; "
"when false, only templates with no keywords are returned."
),
),
skip: int = Query(0, ge=0),
limit: int = Query(50, ge=1, le=200),
sort_by: Optional[str] = Query("name", description="Sort by: name | category | updated"),
sort_dir: Optional[str] = Query("asc", description="Sort direction: asc or desc"),
active_only: bool = Query(True, description="When true (default), only active templates are returned"),
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
query = db.query(DocumentTemplate)
if active_only:
query = query.filter(DocumentTemplate.active == True)
if q:
like = f"%{q}%"
query = query.filter(
or_(
DocumentTemplate.name.ilike(like),
DocumentTemplate.description.ilike(like),
)
)
# Category filtering (supports repeatable param and CSV within each value)
if category:
raw_values = category or []
categories: List[str] = []
for value in raw_values:
parts = [part.strip() for part in (value or "").split(",")]
for part in parts:
if part:
categories.append(part)
unique_categories = sorted(set(categories))
if unique_categories:
query = query.filter(DocumentTemplate.category.in_(unique_categories))
if keywords:
normalized = [kw.strip().lower() for kw in keywords if kw and kw.strip()]
unique_keywords = sorted(set(normalized))
if unique_keywords:
mode = (keywords_mode or "any").lower()
if mode not in ("any", "all"):
mode = "any"
query = query.join(TemplateKeyword, TemplateKeyword.template_id == DocumentTemplate.id)
if mode == "any":
query = query.filter(TemplateKeyword.keyword.in_(unique_keywords)).distinct()
else:
query = query.filter(TemplateKeyword.keyword.in_(unique_keywords))
query = query.group_by(DocumentTemplate.id)
query = query.having(func.count(func.distinct(TemplateKeyword.keyword)) == len(unique_keywords))
# Has keywords filter (independent of specific keyword matches)
if has_keywords is not None:
kw_exists = exists().where(TemplateKeyword.template_id == DocumentTemplate.id)
if has_keywords:
query = query.filter(kw_exists)
else:
query = query.filter(~kw_exists)
# Sorting
sort_key = (sort_by or "name").lower()
direction = (sort_dir or "asc").lower()
if sort_key not in ("name", "category", "updated"):
sort_key = "name"
if direction not in ("asc", "desc"):
direction = "asc"
if sort_key == "name":
order_col = DocumentTemplate.name
elif sort_key == "category":
order_col = DocumentTemplate.category
else: # updated
order_col = func.coalesce(DocumentTemplate.updated_at, DocumentTemplate.created_at)
if direction == "asc":
query = query.order_by(order_col.asc())
else:
query = query.order_by(order_col.desc())
# Pagination
templates = query.offset(skip).limit(limit).all()
items: List[SearchResponseItem] = []
for tpl in templates:
latest_version = None
if tpl.current_version_id:
ver = db.query(DocumentTemplateVersion).filter(DocumentTemplateVersion.id == tpl.current_version_id).first()
if ver:
latest_version = ver.semantic_version
items.append(
SearchResponseItem(
id=tpl.id,
name=tpl.name,
category=tpl.category,
active=tpl.active,
latest_version=latest_version,
)
)
return items
@router.get("/categories", response_model=List[CategoryCount])
async def list_template_categories(
active_only: bool = Query(True, description="When true (default), only active templates are counted"),
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
query = db.query(DocumentTemplate.category, func.count(DocumentTemplate.id).label("count"))
if active_only:
query = query.filter(DocumentTemplate.active == True)
rows = query.group_by(DocumentTemplate.category).order_by(DocumentTemplate.category.asc()).all()
return [CategoryCount(category=row[0], count=row[1]) for row in rows]
@router.get("/{template_id}", response_model=TemplateResponse)
async def get_template(
template_id: int,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
return TemplateResponse(
id=tpl.id,
name=tpl.name,
description=tpl.description,
category=tpl.category,
active=tpl.active,
current_version_id=tpl.current_version_id,
)
@router.get("/{template_id}/versions", response_model=List[VersionResponse])
async def list_versions(
template_id: int,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
versions = (
db.query(DocumentTemplateVersion)
.filter(DocumentTemplateVersion.template_id == template_id)
.order_by(DocumentTemplateVersion.created_at.desc())
.all()
)
return [
VersionResponse(
id=v.id,
template_id=v.template_id,
semantic_version=v.semantic_version,
mime_type=v.mime_type,
size=v.size,
checksum=v.checksum,
changelog=v.changelog,
is_approved=v.is_approved,
)
for v in versions
]
@router.post("/{template_id}/versions", response_model=VersionResponse)
async def add_version(
template_id: int,
semantic_version: str = Form("1.0.0"),
changelog: Optional[str] = Form(None),
approve: bool = Form(True),
file: UploadFile = File(...),
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
content = await file.read()
if not content:
raise HTTPException(status_code=400, detail="No file uploaded")
sha256 = hashlib.sha256(content).hexdigest()
storage = get_default_storage()
storage_path = storage.save_bytes(content=content, filename_hint=file.filename or "template.bin", subdir="templates")
version = DocumentTemplateVersion(
template_id=template_id,
semantic_version=semantic_version,
storage_path=storage_path,
mime_type=file.content_type,
size=len(content),
checksum=sha256,
changelog=changelog,
created_by=getattr(current_user, "username", None),
is_approved=bool(approve),
)
db.add(version)
db.flush()
if approve:
tpl.current_version_id = version.id
db.commit()
return VersionResponse(
id=version.id,
template_id=version.template_id,
semantic_version=version.semantic_version,
mime_type=version.mime_type,
size=version.size,
checksum=version.checksum,
changelog=version.changelog,
is_approved=version.is_approved,
)
@router.post("/{template_id}/preview", response_model=PreviewResponse)
async def preview_template(
template_id: int,
payload: PreviewRequest,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
version_id = payload.version_id or tpl.current_version_id
if not version_id:
raise HTTPException(status_code=400, detail="Template has no versions")
ver = db.query(DocumentTemplateVersion).filter(DocumentTemplateVersion.id == version_id).first()
if not ver:
raise HTTPException(status_code=404, detail="Version not found")
storage = get_default_storage()
content = storage.open_bytes(ver.storage_path)
tokens = extract_tokens_from_bytes(content)
context = build_context(payload.context or {})
resolved, unresolved = resolve_tokens(db, tokens, context)
output_bytes = content
output_mime = ver.mime_type
if ver.mime_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
output_bytes = render_docx(content, resolved)
output_mime = ver.mime_type
# We don't store preview output; just return metadata and resolution state
return PreviewResponse(
resolved=resolved,
unresolved=unresolved,
output_mime_type=output_mime,
output_size=len(output_bytes),
)
@router.get("/{template_id}/keywords", response_model=KeywordsResponse)
async def list_keywords(
template_id: int,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
kws = (
db.query(TemplateKeyword)
.filter(TemplateKeyword.template_id == template_id)
.order_by(TemplateKeyword.keyword.asc())
.all()
)
return KeywordsResponse(keywords=[k.keyword for k in kws])
@router.post("/{template_id}/keywords", response_model=KeywordsResponse)
async def add_keywords(
template_id: int,
payload: KeywordsRequest,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
to_add = []
for kw in (payload.keywords or []):
normalized = (kw or "").strip().lower()
if not normalized:
continue
exists = (
db.query(TemplateKeyword)
.filter(TemplateKeyword.template_id == template_id, TemplateKeyword.keyword == normalized)
.first()
)
if not exists:
to_add.append(TemplateKeyword(template_id=template_id, keyword=normalized))
if to_add:
db.add_all(to_add)
db.commit()
kws = (
db.query(TemplateKeyword)
.filter(TemplateKeyword.template_id == template_id)
.order_by(TemplateKeyword.keyword.asc())
.all()
)
return KeywordsResponse(keywords=[k.keyword for k in kws])
@router.delete("/{template_id}/keywords/{keyword}", response_model=KeywordsResponse)
async def remove_keyword(
template_id: int,
keyword: str,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == template_id).first()
if not tpl:
raise HTTPException(status_code=404, detail="Template not found")
normalized = (keyword or "").strip().lower()
if normalized:
db.query(TemplateKeyword).filter(
TemplateKeyword.template_id == template_id,
TemplateKeyword.keyword == normalized,
).delete(synchronize_session=False)
db.commit()
kws = (
db.query(TemplateKeyword)
.filter(TemplateKeyword.template_id == template_id)
.order_by(TemplateKeyword.keyword.asc())
.all()
)
return KeywordsResponse(keywords=[k.keyword for k in kws])

View File

@@ -93,6 +93,7 @@ def ensure_schema_updates(engine: Engine) -> None:
"pension_schedules": { "pension_schedules": {
"vests_on": "DATE", "vests_on": "DATE",
"vests_at": "FLOAT", "vests_at": "FLOAT",
"version": "VARCHAR(10)",
}, },
"marriage_history": { "marriage_history": {
"married_from": "DATE", "married_from": "DATE",
@@ -102,6 +103,7 @@ def ensure_schema_updates(engine: Engine) -> None:
"service_to": "DATE", "service_to": "DATE",
"service_years": "FLOAT", "service_years": "FLOAT",
"marital_percent": "FLOAT", "marital_percent": "FLOAT",
"version": "VARCHAR(10)",
}, },
"death_benefits": { "death_benefits": {
"lump1": "FLOAT", "lump1": "FLOAT",
@@ -110,6 +112,10 @@ def ensure_schema_updates(engine: Engine) -> None:
"growth2": "FLOAT", "growth2": "FLOAT",
"disc1": "FLOAT", "disc1": "FLOAT",
"disc2": "FLOAT", "disc2": "FLOAT",
"version": "VARCHAR(10)",
},
"separation_agreements": {
"version": "VARCHAR(10)",
}, },
} }

View File

@@ -90,6 +90,7 @@ from app.api.flexible import router as flexible_router
from app.api.support import router as support_router from app.api.support import router as support_router
from app.api.settings import router as settings_router from app.api.settings import router as settings_router
from app.api.mortality import router as mortality_router from app.api.mortality import router as mortality_router
from app.api.pensions import router as pensions_router
logger.info("Including API routers") logger.info("Including API routers")
app.include_router(auth_router, prefix="/api/auth", tags=["authentication"]) app.include_router(auth_router, prefix="/api/auth", tags=["authentication"])
@@ -104,6 +105,7 @@ app.include_router(support_router, prefix="/api/support", tags=["support"])
app.include_router(settings_router, prefix="/api/settings", tags=["settings"]) app.include_router(settings_router, prefix="/api/settings", tags=["settings"])
app.include_router(flexible_router, prefix="/api") app.include_router(flexible_router, prefix="/api")
app.include_router(mortality_router, prefix="/api/mortality", tags=["mortality"]) app.include_router(mortality_router, prefix="/api/mortality", tags=["mortality"])
app.include_router(pensions_router, prefix="/api/pensions", tags=["pensions"])
@app.get("/", response_class=HTMLResponse) @app.get("/", response_class=HTMLResponse)

View File

@@ -81,6 +81,7 @@ class MarriageHistory(BaseModel):
id = Column(Integer, primary_key=True, autoincrement=True) id = Column(Integer, primary_key=True, autoincrement=True)
file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False) file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False)
version = Column(String(10), default="01")
# Marriage details # Marriage details
marriage_date = Column(Date) # Date of marriage marriage_date = Column(Date) # Date of marriage
@@ -110,6 +111,7 @@ class DeathBenefit(BaseModel):
id = Column(Integer, primary_key=True, autoincrement=True) id = Column(Integer, primary_key=True, autoincrement=True)
file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False) file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False)
version = Column(String(10), default="01")
# Death benefit details # Death benefit details
beneficiary_name = Column(String(100)) # Beneficiary name beneficiary_name = Column(String(100)) # Beneficiary name
@@ -138,6 +140,7 @@ class SeparationAgreement(BaseModel):
id = Column(Integer, primary_key=True, autoincrement=True) id = Column(Integer, primary_key=True, autoincrement=True)
file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False) file_no = Column(String(45), ForeignKey("files.file_no"), nullable=False)
version = Column(String(10), default="01")
# Agreement details # Agreement details
agreement_date = Column(Date) # Date of agreement agreement_date = Column(Date) # Date of agreement

View File

@@ -0,0 +1,411 @@
# Legacy System Analysis: Delphi Consulting Group Database
## Executive Summary
This document provides a comprehensive analysis of the legacy Paradox-based database system used by Delphi Consulting Group, based on examination of the .sc (script) files found in the `old database` folder. The legacy system was a sophisticated legal practice management application specialized for family law attorneys handling divorce cases involving pension plan division (QDROs).
## System Architecture Overview
### Technology Stack
- **Platform**: Paradox Database Management System
- **Language**: PAL (Paradox Application Language)
- **Integration**: WordPerfect document assembly
- **File Structure**: Modular .SC script files with shared procedure library
### Core Directory Structure
```
R:\PDOXDATA\OFFICE\ # Main data directory
R:\DOCUMENT\WPDOCS\ # WordPerfect documents
R:\PRIVATE\ # Temporary/merge files
OFFICE.LIB # Shared procedure library
```
## Module-by-Module Analysis
### 1. Main Application Entry (`Main_RH.SC`)
**Purpose**: Application bootstrap and main menu system
**Key Features**:
- Splash screen with company branding
- Hierarchical pulldown menu system
- Library and configuration management
- Keyboard shortcut definitions
**Menu Structure**:
```
Open
├── Rolodex (contact management)
├── File Cabinet (case management)
├── QDRO Screen (pension division documents)
├── Plan Information (retirement plan database)
├── Annuity Evaluator (present value calculations)
└── Deposit Book (banking records)
Utilities
├── Basic Data (lookup tables)
└── Tally Accounts (financial reconciliation)
System
├── Customize (letterhead/branding)
└── Printers (output configuration)
```
**Startup Sequence**:
1. Load procedure library (OFFICE.LIB)
2. Initialize directory paths and variables
3. Display animated splash screen
4. Launch main menu loop
5. Handle user selections via switch statements
### 2. Contact Management (`ROLODEX.SC`)
**Purpose**: Comprehensive contact database for clients, opposing counsel, and related parties
**Data Structure**:
- **Primary Table**: Rolodex (main contact info)
- **Related Tables**: Phone (multiple numbers per contact)
- **Indexing**: By ID, last name, group classification
**Key Features**:
**Multi-Form Interface**:
- Form 1: Standard contact entry
- Form 4: Detailed view with extended information
- Alt-F: Toggle between forms
**Search Capabilities**:
- Multi-criteria search dialog
- ID, name, address, phone number filters
- Group-based filtering (client, opposing counsel, personal)
- Phone number cross-referencing
**Integration Points**:
- File cabinet owner/opposing party lookups
- Document assembly merge fields
- Envelope and label generation
- Phone book report generation
**Report Types**:
- Envelope generation
- Phone book (numbers only)
- Phone book with addresses
- Complete rolodex information
### 3. File Cabinet System (`FILCABNT.SC`)
**Purpose**: Case management with integrated time and billing
**Architecture**: Master-Detail relationship
- **Master**: Files table (case information, account summaries)
- **Detail**: Ledger table (time entries, disbursements, payments)
**File Management Features**:
**Case Information**:
- Unique file numbers
- Client assignment (linked to Rolodex)
- Area of law classification
- Open/close date tracking
- Status management (ACTIVE, INACTIVE, FOLLOW UP, etc.)
- Opposing counsel assignment
- Billing rate assignment by employee
**Account Management**:
- Real-time balance calculations
- Trust account tracking
- Hourly fees, flat fees, disbursements
- Credit/payment tracking
- Transferable amount calculations (trust → fees)
- Previously billed vs. current balance separation
**Advanced Features**:
**Time Tracking**:
- Built-in timer (Alt-T start/stop)
- Elapsed time display with hours:minutes:seconds
- Automatic time entry integration
**Account Operations**:
- Account closure (Alt-C): Automatically closes file and creates payment entry
- Account reopening (Alt-R): Reverses closure process
- Balance summarization (Alt-B): Popup with detailed breakdowns
**Search and Reporting**:
- Complex multi-criteria search
- Account balance filters
- File status filtering (SEND/HOLD for statement printing)
- Multiple report formats (detailed, summary, statements)
### 4. Ledger/Billing System (`LEDGER.SC`)
**Purpose**: Transaction-level time and billing management
**Transaction Types**:
1. **Type 1**: Trust account entries
2. **Type 2**: Hourly time entries (quantity × rate)
3. **Type 3**: Flat fee charges
4. **Type 4**: Disbursements/expenses
5. **Type 5**: Credits/payments
**Data Entry Features**:
**Automatic Calculations**:
- Quantity × Rate = Amount for hourly entries
- Employee rate lookups from Employee table
- Real-time amount recalculation on field changes
**Validation and Business Rules**:
- Required fields: Date, Transaction Code, Employee, Amount
- Automatic item number assignment (handles key violations)
- Billed status tracking (Y/N)
- Default employee assignment from parent file
**Advanced Entry Features**:
- Insert new records (INS key) with date inheritance
- Quick billed status toggle (Alt-Y/Alt-N)
- Date increment/decrement (+/- keys)
- Integrated balance summarization
**Account Reconciliation**:
- Real-time account total updates on record changes
- Separation of billed vs. unbilled amounts
- Trust account balance calculations
- Transferable amount determination
### 5. QDRO Module (`QDRO.SC`)
**Purpose**: Qualified Domestic Relations Order document preparation
**Specialized Features**:
- QDRO-specific data entry forms
- Integration with document assembly system
- Version tracking for QDRO revisions
- User-specific output table generation
**Document Generation Process**:
1. Data entry in QDRO table
2. Copy to temporary output table (Q_Output or user-specific)
3. File number versioning ([File_No] + [Version])
4. Integration with form selection system
5. Automatic cleanup of temporary tables
### 6. Setup and Configuration (`SETUP.SC`)
**Purpose**: Lookup table management and system configuration
**Managed Tables**:
**Basic Data Tables**:
- **FileType**: Areas of law (divorce, personal injury, etc.)
- **Employee**: Attorney/staff information with billing rates
- **TrnsType**: Transaction groupings for accounting
- **Footers**: Statement footer text templates
- **GrupLkup**: Rolodex group classifications
- **TrnsLkup**: Transaction codes and descriptions
- **FileStat**: File status definitions with print settings
- **States**: State abbreviations and full names
- **Printers**: Printer configuration and setup strings
**Table Management Features**:
- Uniform editing interface across all lookup tables
- Field-specific prompts and help text
- Form-based entry with validation
- Print report capabilities for each table
- Context-sensitive speedbar help
**Special Processing**:
- **Deposits**: Automatic payment totaling
- **Date manipulation**: +/- key support for date fields
- **Printer setup**: Integration with printer configuration
### 7. Document Assembly System (`FORM_MGR.SC`)
**Purpose**: Sophisticated legal document generation system
**Architecture**:
- **Form Library**: Searchable database of document templates
- **Index System**: Keyword-based form categorization
- **Merge Process**: Data extraction and WordPerfect integration
**Form Management**:
**Form Selection Interface**:
- Directory browsing with form file listings
- Form description database integration
- Multi-form selection capabilities
- Search by name, description, status, keywords
**Search Capabilities**:
- Text-based searching (name, description, status)
- Keyword index searching with dynamic arrays
- Boolean search combinations
- User-friendly selection interface
**Document Generation Process**:
1. **Form Selection**: Browse and select multiple forms
2. **Data Extraction**: Query current data set for merge fields
3. **Configuration File**: Generate merge instruction file
4. **External Processing**: Execute DOS batch file (GO.BAT)
5. **WordPerfect Integration**: Automatic document assembly
**Technical Implementation**:
- Dynamic array management for form lists
- File system integration for template discovery
- Automatic data export for merge process
- Error handling for missing templates
### 8. Utility Functions (`UTILITY.SC`)
**Purpose**: Shared utility procedures used throughout the system
**Core Utilities**:
**Mode Management**:
- `Edit_Mode()`: Switch to data editing mode
- `Main_Mode()`: Return to read-only mode
- `Clear_Table()`: Exit current data view
**User Interface**:
- `Message_Box()`: Standard dialog messages
- `Yes_No_Cancel()`: Three-button confirmation dialogs
- `Response_Is_Yes()`: Binary confirmation dialogs
- `Sound_Off()`: Audio feedback system
**System Configuration**:
- `Get_Custom_Setup_Variables()`: Load company settings
- `Customize_Setup()`: Edit letterhead and branding
- Printer configuration and management
- Default printer selection
**Reporting Infrastructure**:
- `Print_Report()`: Universal report output handler
- Destination selection (printer/screen/file)
- Escape sequence processing for printer codes
- File output with overwrite protection
**Data Management**:
- `Organize()`: Archive file management (ARCHIVE status handling)
- `Update_Calendar()`: Appointment archival system
- Printer status checking and error handling
## Business Domain Analysis
### Legal Practice Specialization
**Target Market**: Family law attorneys specializing in divorce cases involving retirement benefit division
**Core Competencies**:
1. **QDRO Expertise**: Qualified Domestic Relations Orders for pension division
2. **Complex Asset Division**: Retirement plan valuation and division
3. **Time and Billing**: Specialized legal billing requirements
4. **Document Generation**: Automated legal document preparation
### Workflow Integration
**Typical Case Flow**:
1. **Client Intake**: Rolodex entry with contact information
2. **File Opening**: Create case file with billing parameters
3. **Time Tracking**: Record attorney/staff time with built-in timer
4. **Document Preparation**: Select and generate legal documents
5. **QDRO Drafting**: Specialized pension division order preparation
6. **Billing**: Generate statements and track payments
7. **Case Closure**: Automatic account reconciliation
### Advanced Features
**Financial Management**:
- Trust account IOLTA compliance tracking
- Separate billed/unbilled amount tracking
- Automatic transferable amount calculations
- Statement generation with customizable footers
**Document Automation**:
- Template library with keyword indexing
- Automated data merge with WordPerfect
- Version control for document revisions
- Batch document generation capabilities
## Technical Architecture Patterns
### Event-Driven Programming
- Standardized `WAIT WORKSPACE` event loops
- Consistent key binding patterns (F2=Save, F8=Clear, F9=Edit)
- Menu-driven interactions with hierarchical structure
- Trigger-based field and record validation
### Data Validation Framework
- Field-level validation on departure
- Record-level validation before posting
- Business rule enforcement (required fields, calculations)
- Automatic default value assignment
### Master-Detail Relationships
- Files ↔ Ledger (1:many) with automatic totaling
- Rolodex ↔ Phone (1:many) with contact integration
- Files ↔ Rolodex (many:1) for client/opposing party references
- Form ↔ Index (1:many) for document categorization
### Search and Query Patterns
- Dynamic query building with user criteria
- Complex multi-table joins for reporting
- Pattern matching with wildcard support
- Result set management with subset tables
### User Experience Design
- Context-sensitive help and prompts
- Consistent speedbar layouts
- Progressive disclosure in menu systems
- Modal dialog standardization
## Integration Points
### External System Integration
- **WordPerfect**: Document merge and generation
- **DOS**: Batch file execution for complex operations
- **Printer Systems**: Direct escape sequence management
- **File System**: Template and configuration file management
### Data Export/Import
- Configuration file generation for merges
- Report output to multiple formats (printer/screen/file)
- Automatic data extraction for document assembly
- Archive and reorganization utilities
## Performance and Scalability Considerations
### Database Design
- Indexed access patterns for fast lookups
- Efficient master-detail navigation
- Query optimization with selective criteria
- Memory management with dynamic arrays
### User Interface Responsiveness
- Immediate feedback for user actions
- Progress indicators for long operations
- Background processing for reports
- Efficient screen updates with ECHO control
## Security and Data Integrity
### Access Control
- User-based table access (seen in QDRO user checking)
- Function-level security through menu disabling
- Data validation to prevent corruption
### Backup and Recovery
- Archive table management for historical data
- Automatic data reorganization utilities
- Printer and configuration backup systems
## Conclusion
The legacy system represents a sophisticated, domain-specific application with deep understanding of legal practice management needs. Key strengths include:
1. **Specialized Legal Functionality**: QDRO expertise and legal document automation
2. **Integrated Financial Management**: Complex billing with trust account compliance
3. **Advanced User Interface**: Consistent, efficient data entry patterns
4. **Document Assembly Integration**: Sophisticated template and merge system
5. **Comprehensive Reporting**: Multiple output formats with printer management
The system demonstrates mature software architecture principles adapted to the constraints and capabilities of the Paradox platform, with particular excellence in the specialized domain of family law practice management.

View File

@@ -0,0 +1,462 @@
# Missing Features TODO List
## Overview
Based on the comprehensive analysis of the legacy Paradox system, this document outlines all features present in the original system but missing from the current FastAPI implementation. Items are prioritized by business impact and implementation complexity.
---
## HIGH PRIORITY - Core Business Features
### 🔴 1. Document Assembly System
**Legacy Feature**: Sophisticated legal document generation with WordPerfect integration
**Current Status**: ❌ Not implemented
**Required Components**:
#### 1.1 Document Template Management
- [ ] Create `DocumentTemplate` model
- Template name, description, file path
- Category/area of law classification
- Status (active/inactive)
- Version tracking
- [ ] Template upload and storage system
- [ ] Template preview capabilities
- [ ] Template search and filtering
#### 1.2 Template Keyword/Index System
- [ ] Create `TemplateKeyword` model for searchable tags
- [ ] Keyword management interface
- [ ] Advanced template search by keywords
- [ ] Template categorization system
#### 1.3 Data Merge Engine
- [ ] Variable extraction from current data context
- [ ] Merge field mapping system
- [ ] Template variable substitution engine
- [ ] Support for multiple output formats (PDF, DOCX)
#### 1.4 Form Selection Interface
- [ ] Multi-template selection UI
- [ ] Template preview and description display
- [ ] Batch document generation
- [ ] Generated document management
**API Endpoints Needed**:
```
POST /api/templates/upload
GET /api/templates/search
POST /api/templates/{id}/merge
GET /api/templates/{id}/preview
POST /api/documents/generate-batch
```
### 🔴 2. QDRO (Pension Division) Module
**Legacy Feature**: Specialized module for Qualified Domestic Relations Orders
**Current Status**: ❌ Not implemented
**Required Components**:
#### 2.1 QDRO Data Model
- [ ] Create `QDRO` model
- File number reference
- Version tracking
- Plan information (name, type, administrator)
- Participant details (employee, spouse/ex-spouse)
- Division methodology (percentage, dollar amount, etc.)
- Effective dates and conditions
- [ ] Plan information database
- [ ] QDRO version management
#### 2.2 QDRO-Specific Forms
- [ ] QDRO data entry interface
- [ ] Plan information management
- [ ] Participant role management
- [ ] Division calculation tools
#### 2.3 QDRO Document Generation
- [ ] QDRO-specific templates
- [ ] Integration with document assembly system
- [ ] Version control for QDRO revisions
- [ ] Court approval tracking
**API Endpoints Needed**:
```
POST /api/qdros
GET /api/qdros/{file_no}
PUT /api/qdros/{id}
POST /api/qdros/{id}/generate-document
GET /api/qdros/{id}/versions
POST /api/plan-info
```
### 🔴 3. Advanced Billing & Statement Generation
**Legacy Feature**: Automated billing statement generation with trust account management
**Current Status**: ⚠️ Partially implemented (basic transactions exist)
**Missing Components**:
#### 3.1 Statement Generation Engine
- [ ] Create billing statement templates
- [ ] Automated statement generation by file/client
- [ ] Customizable statement footers by file status
- [ ] Statement preview and approval workflow
- [ ] Batch statement generation
#### 3.2 Enhanced Trust Account Management
- [ ] Extend trust account transaction types
- [ ] Trust account balance tracking per file
- [ ] IOLTA compliance reporting
- [ ] Trust-to-fee transfer automation
- [ ] Trust account reconciliation tools
#### 3.3 Billing Workflow Management
- [ ] Billed/unbilled transaction status tracking
- [ ] Bulk billing status updates
- [ ] Statement approval and locking
- [ ] Payment application workflow
- [ ] Account aging reports
#### 3.4 Advanced Financial Reports
- [ ] Account balance summaries by employee
- [ ] Account aging reports
- [ ] Trust account activity reports
- [ ] Revenue reports by area of law
- [ ] Time utilization reports
**API Endpoints Needed**:
```
POST /api/billing/statements/generate
GET /api/billing/statements/{file_no}
POST /api/billing/statements/batch
PUT /api/financial/transactions/bulk-bill
GET /api/reports/trust-account
GET /api/reports/account-aging
```
---
## MEDIUM PRIORITY - Productivity Features
### 🟡 4. Integrated Time Tracking
**Legacy Feature**: Built-in timer with start/stop functionality
**Current Status**: ❌ Not implemented
**Required Components**:
#### 4.1 Timer System
- [ ] Real-time timer with start/stop/pause
- [ ] Timer state persistence across sessions
- [ ] Multiple concurrent timers by file/task
- [ ] Timer integration with transaction entry
#### 4.2 Time Entry Automation
- [ ] Auto-populate time entries from timer
- [ ] Default rate assignment by employee
- [ ] Automatic quantity calculation
- [ ] Timer history and reporting
**Frontend Components**:
```
TimerWidget
├── Timer display (HH:MM:SS)
├── Start/Stop/Pause controls
├── File/task selection
└── Quick time entry creation
```
**API Endpoints Needed**:
```
POST /api/timers/start
POST /api/timers/{id}/stop
GET /api/timers/active
POST /api/timers/{id}/create-entry
```
### 🟡 5. Enhanced File Management
**Legacy Feature**: Advanced file operations and status management
**Current Status**: ⚠️ Basic file management exists
**Missing Components**:
#### 5.1 File Closure Automation
- [ ] Automatic file closure workflow
- [ ] Outstanding balance payment entry creation
- [ ] File closure validation and confirmations
- [ ] File reopening capabilities
#### 5.2 File Status Workflow
- [ ] Enhanced file status definitions
- [ ] Status-based business rule enforcement
- [ ] Automatic status transitions
- [ ] Status history tracking
#### 5.3 File Organization
- [ ] Archive file management
- [ ] Bulk file status updates
- [ ] File transfer between attorneys
- [ ] File merge capabilities
**API Endpoints Needed**:
```
POST /api/files/{id}/close
POST /api/files/{id}/reopen
POST /api/files/bulk-status-update
POST /api/files/{id}/transfer
```
### 🟡 6. Advanced Printer Management
**Legacy Feature**: Sophisticated printer configuration and report formatting
**Current Status**: ❌ Not implemented
**Required Components**:
#### 6.1 Printer Configuration
- [ ] Printer setup database
- [ ] Escape sequence management
- [ ] Default printer selection
- [ ] Report-specific printer settings
#### 6.2 Report Output Management
- [ ] Multiple output destinations (printer/screen/file)
- [ ] Print preview functionality
- [ ] Batch printing capabilities
- [ ] Print queue management
**Note**: Modern web applications typically rely on browser printing, but for a legal office, direct printer control might still be valuable.
---
## LOW PRIORITY - Nice-to-Have Features
### 🟢 7. Calendar/Appointment System
**Legacy Feature**: Calendar management with appointment archival
**Current Status**: ❌ Not implemented
**Required Components**:
#### 7.1 Calendar Management
- [ ] Create `Calendar` and `Appointment` models
- [ ] Calendar views (day/week/month)
- [ ] Appointment scheduling and management
- [ ] Conflict detection and resolution
#### 7.2 Integration Features
- [ ] File-related appointment linking
- [ ] Court date tracking
- [ ] Deadline management
- [ ] Automatic archival of old appointments
### 🟢 8. Enhanced Lookup Table Management
**Legacy Feature**: Comprehensive lookup table administration
**Current Status**: ⚠️ Basic lookup management exists
**Missing Components**:
#### 8.1 Advanced Lookup Management
- [ ] Dynamic lookup table creation
- [ ] Lookup table relationships
- [ ] Import/export of lookup data
- [ ] Lookup table versioning
#### 8.2 Business Rule Configuration
- [ ] Configurable validation rules
- [ ] Dynamic field requirements
- [ ] Custom calculation formulas
- [ ] Workflow automation rules
### 🟢 9. Enhanced Search Capabilities
**Legacy Feature**: Complex multi-criteria search dialogs
**Current Status**: ✅ Good search exists, but could be enhanced
**Potential Improvements**:
#### 9.1 Advanced Search Features
- [ ] Saved search templates
- [ ] Complex boolean search operators
- [ ] Search history and favorites
- [ ] Export search results to various formats
#### 9.2 Search Performance
- [ ] Search result caching
- [ ] Async search for large datasets
- [ ] Search analytics and optimization
- [ ] Full-text search across documents
---
## SYSTEM ARCHITECTURE IMPROVEMENTS
### 🔵 10. Workflow Engine
**Legacy Feature**: Implicit workflow through business rules
**Current Status**: ❌ Not implemented
**Potential Implementation**:
#### 10.1 Workflow Definition
- [ ] Create workflow definition system
- [ ] State machine implementation
- [ ] Conditional workflow paths
- [ ] Workflow versioning
#### 10.2 Process Automation
- [ ] Automatic task assignment
- [ ] Email notifications and reminders
- [ ] Document generation triggers
- [ ] Status update automation
### 🔵 11. Audit and Compliance
**Legacy Feature**: Basic data integrity checks
**Current Status**: ⚠️ Basic audit logging exists
**Enhanced Features**:
#### 11.1 Comprehensive Audit Trail
- [ ] Detailed change tracking
- [ ] User action logging
- [ ] Data access monitoring
- [ ] Compliance reporting
#### 11.2 Legal Compliance
- [ ] Client confidentiality controls
- [ ] Data retention policies
- [ ] Bar association compliance reporting
- [ ] Trust account regulatory compliance
### 🔵 12. Integration Platform
**Legacy Feature**: WordPerfect and DOS integration
**Current Status**: ❌ No external integrations
**Modern Integrations**:
#### 12.1 Office Suite Integration
- [ ] Microsoft Office integration
- [ ] Google Workspace integration
- [ ] PDF generation and manipulation
- [ ] Email integration (Outlook/Gmail)
#### 12.2 Legal Software Integration
- [ ] Court filing systems
- [ ] Legal research platforms (Westlaw/Lexis)
- [ ] Accounting software integration
- [ ] Case management platforms
---
## IMPLEMENTATION ROADMAP
### Phase 1: Core Business Features (3-6 months)
1. Document Assembly System
2. QDRO Module
3. Advanced Billing & Statement Generation
### Phase 2: Productivity Features (2-4 months)
1. Integrated Time Tracking
2. Enhanced File Management
3. Advanced Printer Management
### Phase 3: System Enhancements (2-3 months)
1. Calendar/Appointment System
2. Enhanced Lookup Management
3. Workflow Engine
### Phase 4: Integration and Compliance (1-2 months)
1. Audit and Compliance
2. Integration Platform
3. Performance Optimization
---
## TECHNICAL CONSIDERATIONS
### Database Schema Changes Required
- New tables: `document_templates`, `template_keywords`, `qdros`, `plan_info`, `timers`, `workflows`
- Extended tables: Enhanced `financial_transactions`, `files`, `customers`
- Audit tables: Comprehensive change tracking
### API Architecture Extensions
- Document generation microservice
- Timer/time tracking service
- Workflow engine service
- Notification service
### Frontend Architecture
- Document preview components
- Timer widgets
- Advanced form builders
- Report generation interfaces
### Infrastructure Requirements
- Document storage (S3/MinIO)
- Background job processing (Celery/RQ)
- Real-time updates (WebSocket)
- Print server integration (if required)
---
## SUCCESS METRICS
### Business Impact
- [ ] Reduction in document preparation time
- [ ] Improved billing accuracy and speed
- [ ] Enhanced client service delivery
- [ ] Compliance with legal practice standards
### Technical Performance
- [ ] Document generation time < 5 seconds
- [ ] Search response time < 500ms
- [ ] 99.9% system uptime
- [ ] Zero data loss incidents
### User Adoption
- [ ] 100% user migration from legacy system
- [ ] Reduced training time for new users
- [ ] Positive user satisfaction scores
- [ ] Increased productivity metrics
---
## ESTIMATED EFFORT
### Development Time (Person-Months)
- **High Priority Features**: 8-12 months
- **Medium Priority Features**: 4-6 months
- **Low Priority Features**: 3-4 months
- **System Architecture**: 2-3 months
### **Total Estimated Effort**: 17-25 person-months
### Team Recommendations
- 1 Full-stack Developer (lead)
- 1 Backend Developer (APIs/database)
- 1 Frontend Developer (UI/UX)
- 1 DevOps Engineer (part-time)
- 1 Legal Domain Expert (consultant)
---
*This TODO list should be regularly updated as features are implemented and new requirements are discovered.*

View File

@@ -230,6 +230,101 @@ Allowed sort fields (high level):
- `PUT /api/documents/qdros/{file_no}/{id}` - Update QDRO - `PUT /api/documents/qdros/{file_no}/{id}` - Update QDRO
- `DELETE /api/documents/qdros/{file_no}/{id}` - Delete QDRO - `DELETE /api/documents/qdros/{file_no}/{id}` - Delete QDRO
📚 See also: [PENSIONS.md](PENSIONS.md) for detailed pensions API fields, sorting, and examples.
### Pensions
- `GET /api/pensions/schedules` - List pension schedules for a file
- Query params: `file_no` (required), `skip`, `limit`, `sort_by` (id,file_no,version,vests_on,vests_at), `sort_dir`, `include_total`,
filters: `start`, `end`, `version`, numeric ranges: `vests_at_min`, `vests_at_max`, search: `search` (tokenized across `version`, `frequency`).
- Examples:
```bash
curl "http://localhost:6920/api/pensions/schedules?file_no=F-1&sort_by=vests_on&sort_dir=asc&limit=20&include_total=true"
curl "http://localhost:6920/api/pensions/schedules?file_no=F-1&version=02&vests_at_min=10&vests_at_max=50"
```
- `GET /api/pensions/marriages` - List marriage history for a file
- Query params: `file_no` (required), `skip`, `limit`, `sort_by` (id,file_no,version,married_from,married_to,marital_percent,service_from,service_to), `sort_dir`, `include_total`,
filters: `start`, `end`, `version`, numeric ranges: `married_years_min/_max`, `service_years_min/_max`, `marital_percent_min/_max`, search: `search` (tokenized across `version`, `spouse_name`, `notes`).
- Example:
```bash
curl "http://localhost:6920/api/pensions/marriages?file_no=F-1&search=Jane%20Doe&sort_by=married_from&sort_dir=desc"
```
- `GET /api/pensions/death-benefits` - List death benefits for a file
- Query params: `file_no` (required), `skip`, `limit`, `sort_by` (id,file_no,version,lump1,lump2,growth1,growth2,disc1,disc2,created), `sort_dir`, `include_total`,
filters: `start`, `end`, `version`, numeric ranges: `lump1_min/_max`, `lump2_min/_max`, `growth1_min/_max`, `growth2_min/_max`, `disc1_min/_max`, `disc2_min/_max`, search: `search` (tokenized across `version`, `beneficiary_name`, `benefit_type`, `notes`).
- Example:
```bash
curl "http://localhost:6920/api/pensions/death-benefits?file_no=F-1&lump1_min=100&sort_by=lump1&sort_dir=desc"
```
- `GET /api/pensions/separations` - List separation agreements for a file
- Query params: `file_no` (required), `skip`, `limit`, `sort_by` (id,file_no,version,agreement_date), `sort_dir`, `include_total`,
filters: `start`, `end`, `version`, search: `search` (tokenized across `version`, `terms`, `notes`).
- Example:
```bash
curl "http://localhost:6920/api/pensions/separations?file_no=F-1&start=2024-01-01&end=2024-12-31&sort_by=agreement_date"
```
- `GET /api/pensions/{file_no}/detail` - Detail view for a file's pension data with nested, independently paginated lists
- Each nested list accepts its own paging/sorting/filtering query prefixes: `s_*` (schedules), `m_*` (marriages), `d_*` (death benefits), `sep_*` (separations)
- Example:
```bash
curl "http://localhost:6920/api/pensions/F-1/detail?s_sort_by=vests_on&s_limit=10&m_sort_by=married_from&d_sort_by=lump1&sep_sort_by=agreement_date"
```
- `POST /api/pensions/` - Create a main Pension record
- Body (JSON, selected fields):
```json
{
"file_no": "F-1",
"version": "01",
"plan_id": "PID1",
"plan_name": "Plan A",
"vested_per": 50,
"tax_rate": 25
}
```
- Notes: numeric validation enforced (e.g., `vested_per` 0100; `tax_rate` 0100; monetary values non-negative)
- `GET /api/pensions/{pension_id}` - Get a Pension by id
- `PUT /api/pensions/{pension_id}` - Update a Pension (partial fields accepted)
- Example:
```bash
curl -X PUT "http://localhost:6920/api/pensions/123" -H 'Content-Type: application/json' -d '{"plan_name":"Plan B","vested_per":75}'
```
- `DELETE /api/pensions/{pension_id}` - Delete a Pension
### Templates
- `GET /api/templates/search` - Search document templates
- Query params:
- `q` (str, optional): partial match on template name or description
- `category` (str[] or CSV, optional): filter by one or more categories. Repeat the parameter (`?category=A&category=B`) or pass a comma-separated list (`?category=A,B`).
- `keywords` (str[], optional, repeatable): keyword tags assigned to templates
- `keywords_mode` (str, optional): `any` (default) returns templates that match any of the provided keywords; `all` returns only templates that contain all the provided keywords
- `has_keywords` (bool, optional): when `true`, only templates that have one or more keywords are returned; when `false`, only templates with no keywords are returned
- `skip` (int, optional): pagination offset, default 0
- `limit` (int, optional): page size, default 50, max 200
- `sort_by` (str, optional): `name` (default) | `category` | `updated`
- `sort_dir` (str, optional): `asc` (default) | `desc`
- `active_only` (bool, optional): when `true` (default), only active templates are returned
- Examples:
```bash
# Any of the keywords (default)
curl "http://localhost:6920/api/templates/search?keywords=qdro&keywords=divorce"
# Must contain all keywords
curl "http://localhost:6920/api/templates/search?keywords=qdro&keywords=divorce&keywords_mode=all"
# Sorted by name descending with pagination
curl "http://localhost:6920/api/templates/search?sort_by=name&sort_dir=desc&skip=10&limit=10"
# Include inactive templates
curl "http://localhost:6920/api/templates/search?active_only=false"
```
- `GET /api/templates/categories` - List distinct template categories with counts
- Query params:
- `active_only` (bool, optional): when `true` (default), only counts active templates
- Example:
```bash
curl "http://localhost:6920/api/templates/categories?active_only=false"
```
### Support ### Support
- `POST /api/support/tickets` - Create support ticket (public; auth optional) - `POST /api/support/tickets` - Create support ticket (public; auth optional)
- `GET /api/support/tickets` - List tickets (admin; supports filters, search, pagination, sorting, `include_total`) - `GET /api/support/tickets` - List tickets (admin; supports filters, search, pagination, sorting, `include_total`)

View File

@@ -0,0 +1,95 @@
import io
from datetime import date
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def test_crud_and_list_filters_for_pensions_tables():
# Auth override
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
# Seed base data via import
rolodex_csv = "Id,Last\nR2,Beta\n"
files_csv = "File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\nF-2,R2,CIVIL,Test,2024-01-01,E01,ACTIVE,100\n"
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
# Create schedule rows
resp = client.post("/api/pensions/schedules", json={"file_no": "F-2", "vests_on": "2024-06-01", "vests_at": 50})
assert resp.status_code == 201
sched_id = resp.json()["id"]
# Filter by date range (hit)
rlist = client.get("/api/pensions/schedules", params={"file_no": "F-2", "start": "2024-01-01", "end": "2024-12-31"})
assert rlist.status_code == 200 and len(rlist.json()) >= 1
# Update
up = client.put(f"/api/pensions/schedules/{sched_id}", json={"vests_at": 75})
assert up.status_code == 200 and up.json()["vests_at"] == 75
# Create marriage history
resp = client.post("/api/pensions/marriages", json={
"file_no": "F-2",
"married_from": "2000-01-01",
"married_to": "2010-01-01",
"married_years": 10,
"service_from": "1998-01-01",
"service_to": "2010-01-01",
"service_years": 12,
"marital_percent": 40,
})
assert resp.status_code == 201
marr_id = resp.json()["id"]
# Filter by married_from
rlist = client.get("/api/pensions/marriages", params={"file_no": "F-2", "start": "1999-01-01", "end": "2001-12-31"})
assert rlist.status_code == 200 and len(rlist.json()) >= 1
# Update
up = client.put(f"/api/pensions/marriages/{marr_id}", json={"marital_percent": 50})
assert up.status_code == 200 and up.json()["marital_percent"] == 50
# Create death benefit
resp = client.post("/api/pensions/death-benefits", json={"file_no": "F-2", "lump1": 1000})
assert resp.status_code == 201
death_id = resp.json()["id"]
# List by file_no (created today)
rlist = client.get("/api/pensions/death-benefits", params={"file_no": "F-2"})
assert rlist.status_code == 200 and any(row.get("id") == death_id for row in rlist.json())
# Update
up = client.put(f"/api/pensions/death-benefits/{death_id}", json={"lump2": 500})
assert up.status_code == 200 and up.json()["lump2"] == 500
# Create separation agreement
resp = client.post("/api/pensions/separations", json={"file_no": "F-2", "agreement_date": "2024-02-01", "terms": "Terms"})
assert resp.status_code == 201
sep_id = resp.json()["id"]
# Filter by agreement_date
rlist = client.get("/api/pensions/separations", params={"file_no": "F-2", "start": "2024-01-01", "end": "2024-12-31"})
assert rlist.status_code == 200 and len(rlist.json()) >= 1
# Update
up = client.put(f"/api/pensions/separations/{sep_id}", json={"terms": "Updated"})
assert up.status_code == 200 and up.json()["terms"] == "Updated"
# Delete paths
assert client.delete(f"/api/pensions/schedules/{sched_id}").status_code == 204
assert client.delete(f"/api/pensions/marriages/{marr_id}").status_code == 204
assert client.delete(f"/api/pensions/death-benefits/{death_id}").status_code == 204
assert client.delete(f"/api/pensions/separations/{sep_id}").status_code == 204
# Cleanup override
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,84 @@
import io
from datetime import date
import uuid
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def _seed_file(client: TestClient, file_no: str, owner_id: str = None) -> None:
owner_id = owner_id or f"R{uuid.uuid4().hex[:6]}"
rolodex_csv = f"Id,Last\n{owner_id},Alpha\n"
files_csv = (
"File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\n"
f"{file_no},{owner_id},CIVIL,Test,{date.today():%Y-%m-%d},E01,ACTIVE,100\n"
)
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
def _auth():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
def test_pension_crud_and_validation():
_auth()
client = TestClient(app)
file_no = f"PF-CRUD-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
# Create
create_payload = {
"file_no": file_no,
"version": "01",
"plan_id": "PID1",
"plan_name": "Plan A",
"vested_per": 50,
"tax_rate": 25,
}
r = client.post("/api/pensions/", json=create_payload)
assert r.status_code == 201
pid = r.json()["id"]
# Get
rg = client.get(f"/api/pensions/{pid}")
assert rg.status_code == 200 and rg.json()["plan_name"] == "Plan A"
# Update
ru = client.put(f"/api/pensions/{pid}", json={"plan_name": "Plan B", "vested_per": 75})
assert ru.status_code == 200 and ru.json()["plan_name"] == "Plan B" and ru.json()["vested_per"] == 75
# Validation edges: negative values or over 100 should fail
bads = [
{"vested_per": -1},
{"vested_per": 101},
{"tax_rate": -5},
{"tax_rate": 150},
{"valu": -0.01},
]
for payload in bads:
rv = client.put(f"/api/pensions/{pid}", json=payload)
assert rv.status_code == 422
# Delete
rd = client.delete(f"/api/pensions/{pid}")
assert rd.status_code == 204
r404 = client.get(f"/api/pensions/{pid}")
assert r404.status_code == 404
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,92 @@
import io
from datetime import date
import uuid
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def _seed_file(client: TestClient, file_no: str, owner_id: str = None) -> None:
owner_id = owner_id or f"R{uuid.uuid4().hex[:6]}"
rolodex_csv = f"Id,Last\n{owner_id},Alpha\n"
files_csv = (
"File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\n"
f"{file_no},{owner_id},CIVIL,Test,{date.today():%Y-%m-%d},E01,ACTIVE,100\n"
)
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
def _auth():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
def test_pension_detail_nested_collections_support_pagination_and_sorting():
_auth()
client = TestClient(app)
file_no = f"PF-DET-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
# Seed related rows
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "01", "frequency": "Monthly", "vests_on": "2024-01-01", "vests_at": 10})
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "01", "frequency": "Monthly", "vests_on": "2024-02-01", "vests_at": 20})
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "01", "frequency": "Monthly", "vests_on": "2024-03-01", "vests_at": 30})
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "01", "married_from": "2001-01-01", "marital_percent": 10})
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "01", "married_from": "2002-01-01", "marital_percent": 20})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "01", "lump1": 100})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "01", "lump1": 200})
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "01", "agreement_date": "2024-02-01", "terms": "X"})
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "01", "agreement_date": "2024-03-01", "terms": "Y"})
# Call detail with pagination and sorting per section
resp = client.get(
f"/api/pensions/{file_no}/detail",
params={
"s_sort_by": "vests_on", "s_sort_dir": "asc", "s_limit": 2, "s_skip": 1,
"m_sort_by": "married_from", "m_sort_dir": "desc", "m_limit": 1,
"d_sort_by": "lump1", "d_sort_dir": "desc",
"sep_sort_by": "agreement_date", "sep_sort_dir": "asc",
},
)
assert resp.status_code == 200
body = resp.json()
# Validate schedules pagination
sched = body["schedules"]
assert isinstance(sched["items"], list)
assert sched["total"] >= 3
# After skipping first (Jan), next two should start from Feb
sched_dates = [row["vests_on"] for row in sched["items"]]
assert sched_dates[0] == "2024-02-01"
# Marriages sorted desc
marr = body["marriages"]
assert [row["married_from"] for row in marr["items"]][0] == "2002-01-01"
# Death benefits sorted desc by lump1
deaths = body["death_benefits"]
assert [row["lump1"] for row in deaths["items"]][:2] == [200, 100]
# Separations sorted asc by date
seps = body["separations"]
assert [row["agreement_date"] for row in seps["items"]][:2] == ["2024-02-01", "2024-03-01"]
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,158 @@
import io
from datetime import date
import uuid
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def _seed_file(client: TestClient, file_no: str, owner_id: str = None) -> None:
owner_id = owner_id or f"R{uuid.uuid4().hex[:6]}"
rolodex_csv = f"Id,Last\n{owner_id},Alpha\n"
files_csv = (
"File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\n"
f"{file_no},{owner_id},CIVIL,Test,{date.today():%Y-%m-%d},E01,ACTIVE,100\n"
)
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
def _auth():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
def test_schedule_filters_by_version_and_vests_at_range():
_auth()
client = TestClient(app)
file_no = f"PF-SF-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "01", "vests_on": "2024-01-01", "vests_at": 10})
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "02", "vests_on": "2024-02-01", "vests_at": 20})
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "02", "vests_on": "2024-03-01", "vests_at": 30})
# Filter by version
r = client.get("/api/pensions/schedules", params={"file_no": file_no, "version": "02", "sort_by": "vests_on"})
assert r.status_code == 200
body = r.json()
assert len(body) == 2
assert {row["vests_at"] for row in body} == {20, 30}
# Filter by vests_at range
r = client.get("/api/pensions/schedules", params={"file_no": file_no, "vests_at_min": 15, "vests_at_max": 25})
assert r.status_code == 200
body = r.json()
assert len(body) == 1 and body[0]["vests_at"] == 20
app.dependency_overrides.pop(get_current_user, None)
def test_marriage_filters_by_version_and_numeric_ranges():
_auth()
client = TestClient(app)
file_no = f"PF-MF-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "01", "married_from": "2000-01-01", "married_years": 10, "service_years": 12, "marital_percent": 40})
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "02", "married_from": "2005-01-01", "married_years": 5, "service_years": 8, "marital_percent": 20})
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "02", "married_from": "2010-01-01", "married_years": 15, "service_years": 20, "marital_percent": 60})
# Version filter
r = client.get("/api/pensions/marriages", params={"file_no": file_no, "version": "02", "sort_by": "married_from"})
assert r.status_code == 200
items = r.json()
assert len(items) == 2
assert {row["marital_percent"] for row in items} == {20, 60}
# married_years range
r = client.get("/api/pensions/marriages", params={"file_no": file_no, "married_years_min": 6, "married_years_max": 12})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["married_years"] == 10
# service_years and marital_percent ranges combined
r = client.get("/api/pensions/marriages", params={
"file_no": file_no,
"service_years_min": 10,
"service_years_max": 20,
"marital_percent_min": 50,
"marital_percent_max": 70,
})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["marital_percent"] == 60
app.dependency_overrides.pop(get_current_user, None)
def test_death_filters_by_version_and_numeric_ranges():
_auth()
client = TestClient(app)
file_no = f"PF-DF-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "01", "lump1": 100, "lump2": 5, "growth1": 1, "growth2": 2, "disc1": 0.5, "disc2": 0.2})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "02", "lump1": 300, "lump2": 7, "growth1": 3, "growth2": 4, "disc1": 0.7, "disc2": 0.3})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "02", "lump1": 200, "lump2": 6, "growth1": 2, "growth2": 3, "disc1": 0.6, "disc2": 0.25})
# Version filter
r = client.get("/api/pensions/death-benefits", params={"file_no": file_no, "version": "02", "sort_by": "lump1", "sort_dir": "asc"})
assert r.status_code == 200
items = r.json()
assert [row["lump1"] for row in items] == [200, 300]
# Numeric ranges combined
r = client.get("/api/pensions/death-benefits", params={
"file_no": file_no,
"lump1_min": 150,
"lump1_max": 250,
"growth1_min": 1.5,
"growth1_max": 2.5,
"disc1_min": 0.55,
"disc1_max": 0.65,
})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["lump1"] == 200
app.dependency_overrides.pop(get_current_user, None)
def test_separations_filters_by_version_and_date_range():
_auth()
client = TestClient(app)
file_no = f"PF-SepF-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "01", "agreement_date": "2024-01-01", "terms": "t1"})
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "02", "agreement_date": "2024-02-01", "terms": "t2"})
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "02", "agreement_date": "2024-03-01", "terms": "t3"})
# Version filter
r = client.get("/api/pensions/separations", params={"file_no": file_no, "version": "02", "sort_by": "agreement_date", "sort_dir": "asc"})
assert r.status_code == 200
dates = [row.get("agreement_date") for row in r.json()]
assert dates == ["2024-02-01", "2024-03-01"]
# Date range
r = client.get("/api/pensions/separations", params={"file_no": file_no, "start": "2024-01-15", "end": "2024-02-15"})
assert r.status_code == 200
dates = [row.get("agreement_date") for row in r.json()]
assert dates == ["2024-02-01"]
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,76 @@
import io
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def test_batch_import_includes_pension_aux_files_and_read_endpoints():
# Auth override
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
# Minimal seed for dependent data
rolodex_csv = "Id,Last\nR1,Alpha\n"
files_csv = "File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\nF-1,R1,CIVIL,Test,2024-01-01,E01,ACTIVE,100\n"
schedule_csv = "File_No,Version,Vests_On,Vests_At\nF-1,01,2024-01-01,100\n"
marriage_csv = (
"File_No,Version,Married_From,Married_To,Married_Years,Service_From,Service_To,Service_Years,Marital_%\n"
"F-1,01,2000-01-01,2010-01-01,10,1995-01-01,2010-01-01,15,50\n"
)
death_csv = "File_No,Version,Lump1,Lump2,Growth1,Growth2,Disc1,Disc2\nF-1,01,1000,0,0,0,0,0\n"
separate_csv = "File_No,Version,Separation_Rate\nF-1,01,Terms\n"
payload = [
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
_csv_file("SCHEDULE.csv", schedule_csv),
_csv_file("MARRIAGE.csv", marriage_csv),
_csv_file("DEATH.csv", death_csv),
_csv_file("SEPARATE.csv", separate_csv),
]
# Batch upload
resp = client.post("/api/import/batch-upload", files=payload)
assert resp.status_code == 200
body = resp.json()
results = body.get("batch_results", [])
# Ensure each target file is reported as processed with at least one row
by_name = {r.get("file_type"): r for r in results}
for name in ("SCHEDULE.csv", "MARRIAGE.csv", "DEATH.csv", "SEPARATE.csv"):
assert name in by_name
assert by_name[name].get("imported_count", 0) >= 1
# Call read endpoints
r1 = client.get("/api/pensions/schedules", params={"file_no": "F-1"})
assert r1.status_code == 200
assert isinstance(r1.json(), list)
r2 = client.get("/api/pensions/marriages", params={"file_no": "F-1"})
assert r2.status_code == 200
assert isinstance(r2.json(), list)
r3 = client.get("/api/pensions/death-benefits", params={"file_no": "F-1"})
assert r3.status_code == 200
assert isinstance(r3.json(), list)
r4 = client.get("/api/pensions/separations", params={"file_no": "F-1"})
assert r4.status_code == 200
assert isinstance(r4.json(), list)
# Cleanup override
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,235 @@
import io
from datetime import date
import uuid
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def _seed_file(client: TestClient, file_no: str, owner_id: str = "RS") -> None:
rolodex_csv = f"Id,Last\n{owner_id},Alpha\n"
files_csv = (
"File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\n"
f"{file_no},{owner_id},CIVIL,Test,{date.today():%Y-%m-%d},E01,ACTIVE,100\n"
)
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
def test_pensions_schedules_pagination_and_sorting():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
file_no = f"PF-SCHED-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
# Create three schedules
client.post("/api/pensions/schedules", json={"file_no": file_no, "vests_on": "2024-01-01", "vests_at": 10})
client.post("/api/pensions/schedules", json={"file_no": file_no, "vests_on": "2024-02-01", "vests_at": 20})
client.post("/api/pensions/schedules", json={"file_no": file_no, "vests_on": "2024-03-01", "vests_at": 30})
# Sort by vests_on ascending
r = client.get("/api/pensions/schedules", params={
"file_no": file_no,
"sort_by": "vests_on",
"sort_dir": "asc",
})
assert r.status_code == 200
dates = [row.get("vests_on") for row in r.json()]
assert dates[:3] == ["2024-01-01", "2024-02-01", "2024-03-01"]
# Sort by vests_on descending
r = client.get("/api/pensions/schedules", params={
"file_no": file_no,
"sort_by": "vests_on",
"sort_dir": "desc",
})
assert r.status_code == 200
dates = [row.get("vests_on") for row in r.json()]
assert dates[:3] == ["2024-03-01", "2024-02-01", "2024-01-01"]
# Pagination with include_total
r = client.get("/api/pensions/schedules", params={
"file_no": file_no,
"sort_by": "vests_on",
"sort_dir": "asc",
"limit": 2,
"skip": 0,
"include_total": True,
})
assert r.status_code == 200
body = r.json()
assert isinstance(body.get("items"), list)
assert body["total"] >= 3
assert [row.get("vests_on") for row in body["items"]] == ["2024-01-01", "2024-02-01"]
r = client.get("/api/pensions/schedules", params={
"file_no": file_no,
"sort_by": "vests_on",
"sort_dir": "asc",
"limit": 2,
"skip": 2,
})
assert r.status_code == 200
tail = r.json()
assert len(tail) >= 1
assert tail[0]["vests_on"] == "2024-03-01"
app.dependency_overrides.pop(get_current_user, None)
def test_pensions_marriages_pagination_and_sorting():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
file_no = f"PF-MARR-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/marriages", json={
"file_no": file_no,
"married_from": "2000-01-01",
"married_to": "2005-01-01",
"marital_percent": 10,
})
client.post("/api/pensions/marriages", json={
"file_no": file_no,
"married_from": "2005-01-01",
"married_to": "2010-01-01",
"marital_percent": 20,
})
client.post("/api/pensions/marriages", json={
"file_no": file_no,
"married_from": "2010-01-01",
"married_to": "2015-01-01",
"marital_percent": 30,
})
# Sort by marital_percent desc
r = client.get("/api/pensions/marriages", params={
"file_no": file_no,
"sort_by": "marital_percent",
"sort_dir": "desc",
})
assert r.status_code == 200
percents = [row.get("marital_percent") for row in r.json()]
assert percents[:3] == [30, 20, 10]
# Pagination
r = client.get("/api/pensions/marriages", params={
"file_no": file_no,
"sort_by": "married_from",
"sort_dir": "asc",
"limit": 1,
"skip": 1,
"include_total": True,
})
assert r.status_code == 200
body = r.json()
assert body["total"] >= 3
assert body["items"][0]["married_from"] == "2005-01-01"
app.dependency_overrides.pop(get_current_user, None)
def test_pensions_death_benefits_pagination_and_sorting():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
file_no = f"PF-DEATH-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "lump1": 100})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "lump1": 300})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "lump1": 200})
# Sort by lump1 desc
r = client.get("/api/pensions/death-benefits", params={
"file_no": file_no,
"sort_by": "lump1",
"sort_dir": "desc",
})
assert r.status_code == 200
l1s = [row.get("lump1") for row in r.json()]
assert l1s[:3] == [300, 200, 100]
# Pagination basic
r = client.get("/api/pensions/death-benefits", params={
"file_no": file_no,
"sort_by": "lump1",
"sort_dir": "asc",
"limit": 2,
"skip": 1,
})
assert r.status_code == 200
page = r.json()
assert [row.get("lump1") for row in page] == [200, 300]
app.dependency_overrides.pop(get_current_user, None)
def test_pensions_separations_pagination_and_sorting():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
client = TestClient(app)
file_no = f"PF-SEP-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/separations", json={"file_no": file_no, "agreement_date": "2024-01-01", "terms": "t1"})
client.post("/api/pensions/separations", json={"file_no": file_no, "agreement_date": "2024-02-01", "terms": "t2"})
client.post("/api/pensions/separations", json={"file_no": file_no, "agreement_date": "2024-03-01", "terms": "t3"})
# Sort by agreement_date desc
r = client.get("/api/pensions/separations", params={
"file_no": file_no,
"sort_by": "agreement_date",
"sort_dir": "desc",
})
assert r.status_code == 200
dates = [row.get("agreement_date") for row in r.json()]
assert dates[:3] == ["2024-03-01", "2024-02-01", "2024-01-01"]
# Pagination
r = client.get("/api/pensions/separations", params={
"file_no": file_no,
"sort_by": "agreement_date",
"sort_dir": "asc",
"limit": 1,
"skip": 2,
"include_total": True,
})
assert r.status_code == 200
body = r.json()
assert body["total"] >= 3
assert body["items"][0]["agreement_date"] == "2024-03-01"
app.dependency_overrides.pop(get_current_user, None)

View File

@@ -0,0 +1,131 @@
import io
import uuid
from datetime import date
from fastapi.testclient import TestClient
from app.main import app
from app.auth.security import get_current_user
def _csv_file(name: str, text: str):
return ("files", (name, io.BytesIO(text.encode("utf-8")), "text/csv"))
def _seed_file(client: TestClient, file_no: str, owner_id: str = None) -> None:
owner_id = owner_id or f"R{uuid.uuid4().hex[:6]}"
rolodex_csv = f"Id,Last\n{owner_id},Alpha\n"
files_csv = (
"File_No,Id,File_Type,Regarding,Opened,Empl_Num,Status,Rate_Per_Hour\n"
f"{file_no},{owner_id},CIVIL,Test,{date.today():%Y-%m-%d},E01,ACTIVE,100\n"
)
client.post("/api/import/batch-upload", files=[
_csv_file("ROLODEX.csv", rolodex_csv),
_csv_file("FILES.csv", files_csv),
])
def _auth():
app.dependency_overrides[get_current_user] = lambda: {
"id": 1,
"username": "tester",
"is_admin": True,
"is_active": True,
}
def test_schedule_tokenized_search_version_and_frequency():
_auth()
client = TestClient(app)
file_no = f"PF-SS-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "A1", "frequency": "Monthly", "vests_on": "2024-01-01"})
client.post("/api/pensions/schedules", json={"file_no": file_no, "version": "B2", "frequency": "Quarterly", "vests_on": "2024-02-01"})
# Both tokens must be present across allowed columns
r = client.get("/api/pensions/schedules", params={"file_no": file_no, "search": "B2 Month"})
assert r.status_code == 200
items = r.json()
# No schedule has both 'B2' and 'Month'
assert items == []
# Single token search
r = client.get("/api/pensions/schedules", params={"file_no": file_no, "search": "Monthly"})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["frequency"] == "Monthly"
app.dependency_overrides.pop(get_current_user, None)
def test_marriages_tokenized_search_spouse_and_notes():
_auth()
client = TestClient(app)
file_no = f"PF-MS-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "01", "spouse_name": "Jane Doe", "notes": "Alpha beta"})
client.post("/api/pensions/marriages", json={"file_no": file_no, "version": "02", "spouse_name": "John Smith", "notes": "Gamma delta"})
# Both tokens required across fields
r = client.get("/api/pensions/marriages", params={"file_no": file_no, "search": "Jane delta"})
assert r.status_code == 200
items = r.json()
assert items == []
# Single token
r = client.get("/api/pensions/marriages", params={"file_no": file_no, "search": "Gamma"})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["spouse_name"] == "John Smith"
app.dependency_overrides.pop(get_current_user, None)
def test_death_tokenized_search_beneficiary_and_type():
_auth()
client = TestClient(app)
file_no = f"PF-DS-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "01", "beneficiary_name": "Alice", "benefit_type": "Lump Sum", "notes": "Alpha"})
client.post("/api/pensions/death-benefits", json={"file_no": file_no, "version": "02", "beneficiary_name": "Bob", "benefit_type": "Annuity", "notes": "Beta"})
# Both tokens required
r = client.get("/api/pensions/death-benefits", params={"file_no": file_no, "search": "Alice Annuity"})
assert r.status_code == 200
items = r.json()
assert items == []
# Single token
r = client.get("/api/pensions/death-benefits", params={"file_no": file_no, "search": "Annuity"})
assert r.status_code == 200
items = r.json()
assert len(items) == 1 and items[0]["benefit_type"] == "Annuity"
app.dependency_overrides.pop(get_current_user, None)
def test_separations_tokenized_search_terms_and_notes():
_auth()
client = TestClient(app)
file_no = f"PF-SSep-{uuid.uuid4().hex[:8]}"
_seed_file(client, file_no)
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "01", "agreement_date": "2024-01-01", "terms": "Alpha Clause", "notes": "First"})
client.post("/api/pensions/separations", json={"file_no": file_no, "version": "02", "agreement_date": "2024-02-01", "terms": "Beta Clause", "notes": "Second"})
# Both tokens required
r = client.get("/api/pensions/separations", params={"file_no": file_no, "search": "Alpha Second"})
assert r.status_code == 200
items = r.json()
assert items == []
# Single token
r = client.get("/api/pensions/separations", params={"file_no": file_no, "search": "Clause"})
assert r.status_code == 200
items = r.json()
assert len(items) == 2
app.dependency_overrides.pop(get_current_user, None)

378
tests/test_templates_api.py Normal file
View File

@@ -0,0 +1,378 @@
import os
import io
from fastapi.testclient import TestClient
import pytest
os.environ.setdefault("SECRET_KEY", "x" * 32)
os.environ.setdefault("DATABASE_URL", "sqlite:////tmp/delphi_test.sqlite")
from app.main import app # noqa: E402
from app.auth.security import get_current_user # noqa: E402
class _User:
def __init__(self):
self.id = 1
self.username = "tester"
self.is_admin = True
self.is_active = True
@pytest.fixture()
def client():
app.dependency_overrides[get_current_user] = lambda: _User()
try:
yield TestClient(app)
finally:
app.dependency_overrides.pop(get_current_user, None)
def _dummy_docx_bytes():
# Minimal docx that docxtpl can open. To avoid binary template creation,
# we use a pre-generated minimal DOCX header stored as bytes.
# Fallback: create in-memory empty docx using python-docx if available.
try:
from docx import Document
except Exception:
return b"PK\x03\x04" # still accepted and stored; preview will not render
d = Document()
p = d.add_paragraph()
p.add_run("Hello ")
p.add_run("{{CLIENT_NAME}}")
buf = io.BytesIO()
d.save(buf)
return buf.getvalue()
def test_upload_search_get_versions_and_preview(client: TestClient):
# Upload a DOCX template
payload = {
"name": "Engagement Letter",
"category": "GENERAL",
"description": "Test template",
"semantic_version": "1.0.0",
}
files = {
"file": ("letter.docx", _dummy_docx_bytes(), "application/vnd.openxmlformats-officedocument.wordprocessingml.document"),
}
resp = client.post("/api/templates/upload", data=payload, files=files)
assert resp.status_code == 200, resp.text
tpl = resp.json()
tpl_id = tpl["id"]
# Search
resp = client.get("/api/templates/search?q=Engagement")
assert resp.status_code == 200
assert any(item["id"] == tpl_id for item in resp.json())
# Also match by description via q
resp = client.get("/api/templates/search?q=template")
assert resp.status_code == 200
ids = {item["id"] for item in resp.json()}
assert tpl_id in ids
# Get template
resp = client.get(f"/api/templates/{tpl_id}")
assert resp.status_code == 200
# List versions
resp = client.get(f"/api/templates/{tpl_id}/versions")
assert resp.status_code == 200
versions = resp.json()
assert len(versions) >= 1
vid = versions[0]["id"]
# Preview with context that resolves CLIENT_NAME
resp = client.post(
f"/api/templates/{tpl_id}/preview",
json={"context": {"CLIENT_NAME": "Alice"}, "version_id": vid},
)
assert resp.status_code == 200, resp.text
body = resp.json()
assert "resolved" in body and body["resolved"].get("CLIENT_NAME") == "Alice"
assert isinstance(body.get("unresolved", []), list)
assert body.get("output_size", 0) >= 0
def _docx_with_tokens(text: str) -> bytes:
try:
from docx import Document
except Exception:
return b"PK\x03\x04"
d = Document()
d.add_paragraph(text)
buf = io.BytesIO()
d.save(buf)
return buf.getvalue()
def test_add_version_and_form_variable_resolution(client: TestClient):
# Upload initial template
files = {"file": ("vars.docx", _docx_with_tokens("{{OFFICE_NAME}}"), "application/vnd.openxmlformats-officedocument.wordprocessingml.document")}
resp = client.post(
"/api/templates/upload",
data={"name": "VarsTpl", "semantic_version": "1.0.0"},
files=files,
)
assert resp.status_code == 200, resp.text
tpl_id = resp.json()["id"]
# Add a new version
files2 = {"file": ("vars2.docx", _docx_with_tokens("{{OFFICE_NAME}} {{NEW_FIELD}}"), "application/vnd.openxmlformats-officedocument.wordprocessingml.document")}
resp = client.post(
f"/api/templates/{tpl_id}/versions",
data={"semantic_version": "1.1.0", "approve": True},
files=files2,
)
assert resp.status_code == 200, resp.text
# Insert a FormVariable directly
from app.database.base import SessionLocal
from app.models.additional import FormVariable
db = SessionLocal()
try:
db.merge(FormVariable(identifier="OFFICE_NAME", query="static", response="Delphi", active=1))
db.commit()
finally:
db.close()
# Preview without explicit context should resolve OFFICE_NAME from FormVariable
resp = client.post(f"/api/templates/{tpl_id}/preview", json={"context": {}})
assert resp.status_code == 200, resp.text
body = resp.json()
assert body["resolved"].get("OFFICE_NAME") == "Delphi"
def _upload_template(client: TestClient, name: str, category: str = "GENERAL") -> int:
files = {
"file": ("t.docx", _dummy_docx_bytes(), "application/vnd.openxmlformats-officedocument.wordprocessingml.document"),
}
resp = client.post(
"/api/templates/upload",
data={"name": name, "category": category, "semantic_version": "1.0.0"},
files=files,
)
assert resp.status_code == 200, resp.text
return resp.json()["id"]
def _add_keywords(client: TestClient, template_id: int, keywords):
resp = client.post(f"/api/templates/{template_id}/keywords", json={"keywords": list(keywords)})
assert resp.status_code == 200, resp.text
def test_templates_search_keywords_mode_any_and_all(client: TestClient):
# Create three templates with overlapping keywords
t1 = _upload_template(client, "Template A")
t2 = _upload_template(client, "Template B")
t3 = _upload_template(client, "Template C")
_add_keywords(client, t1, ["divorce", "qdro"]) # both
_add_keywords(client, t2, ["divorce"]) # only divorce
_add_keywords(client, t3, ["qdro", "pension"]) # qdro + other
# ANY (default)
resp = client.get(
"/api/templates/search",
params=[("keywords", "divorce"), ("keywords", "qdro")],
)
assert resp.status_code == 200, resp.text
ids = {item["id"] for item in resp.json()}
assert {t1, t2, t3}.issubset(ids) # all three should appear
# ANY (explicit)
resp = client.get(
"/api/templates/search",
params=[("keywords", "divorce"), ("keywords", "qdro"), ("keywords_mode", "any")],
)
assert resp.status_code == 200
ids = {item["id"] for item in resp.json()}
assert {t1, t2, t3}.issubset(ids)
# ALL - must contain both divorce AND qdro
resp = client.get(
"/api/templates/search",
params=[("keywords", "divorce"), ("keywords", "qdro"), ("keywords_mode", "all")],
)
assert resp.status_code == 200, resp.text
ids = {item["id"] for item in resp.json()}
assert ids == {t1}
def test_templates_search_pagination_and_sorting(client: TestClient):
# Ensure clean state for this test
# Upload multiple templates with different names and categories
ids = []
ids.append(_upload_template(client, "Alpha", category="CAT2"))
ids.append(_upload_template(client, "Charlie", category="CAT1"))
ids.append(_upload_template(client, "Bravo", category="CAT1"))
ids.append(_upload_template(client, "Echo", category="CAT3"))
ids.append(_upload_template(client, "Delta", category="CAT2"))
# Sort by name asc, limit 2
resp = client.get(
"/api/templates/search",
params={"sort_by": "name", "sort_dir": "asc", "limit": 2},
)
assert resp.status_code == 200, resp.text
names = [item["name"] for item in resp.json()]
assert names == sorted(names) # asc
assert len(names) == 2
# Sort by name desc, skip 1, limit 3
resp = client.get(
"/api/templates/search",
params={"sort_by": "name", "sort_dir": "desc", "skip": 1, "limit": 3},
)
assert resp.status_code == 200
names_desc = [item["name"] for item in resp.json()]
assert len(names_desc) == 3
assert names_desc == sorted(names_desc, reverse=True)
# Sort by category asc (ties unresolved by name asc implicitly by DB)
resp = client.get(
"/api/templates/search",
params={"sort_by": "category", "sort_dir": "asc"},
)
assert resp.status_code == 200
categories = [item["category"] for item in resp.json()]
assert categories == sorted(categories)
# Sort by updated desc
resp = client.get(
"/api/templates/search",
params={"sort_by": "updated", "sort_dir": "desc"},
)
assert resp.status_code == 200
# We can't assert exact order of timestamps easily; just ensure we got results
assert isinstance(resp.json(), list) and len(resp.json()) >= 5
def test_templates_search_active_filtering(client: TestClient):
# Create two templates, mark one inactive directly
tid_active = _upload_template(client, "Active T")
tid_inactive = _upload_template(client, "Inactive T")
# Mark second as inactive via direct DB update
from app.database.base import SessionLocal
from app.models.templates import DocumentTemplate
db = SessionLocal()
try:
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == tid_inactive).first()
tpl.active = False
db.commit()
finally:
db.close()
# Default active_only=true should return only the active one
resp = client.get("/api/templates/search")
assert resp.status_code == 200
ids = {item["id"] for item in resp.json()}
assert tid_active in ids
assert tid_inactive not in ids
# active_only=false should return both
resp = client.get("/api/templates/search", params={"active_only": False})
assert resp.status_code == 200
ids2 = {item["id"] for item in resp.json()}
assert tid_active in ids2 and tid_inactive in ids2
def test_templates_search_category_multi_repeat_and_csv(client: TestClient):
# Upload templates across multiple categories
t_cat1_a = _upload_template(client, "C1-A", category="CAT1")
t_cat1_b = _upload_template(client, "C1-B", category="CAT1")
t_cat2 = _upload_template(client, "C2-A", category="CAT2")
t_cat3 = _upload_template(client, "C3-A", category="CAT3")
# Repeatable category parameters (?category=CAT1&category=CAT3)
resp = client.get(
"/api/templates/search",
params=[("category", "CAT1"), ("category", "CAT3")],
)
assert resp.status_code == 200, resp.text
ids = {item["id"] for item in resp.json()}
# Expect only CAT1 and CAT3 templates
assert t_cat1_a in ids and t_cat1_b in ids and t_cat3 in ids
assert t_cat2 not in ids
# CSV within a single category parameter (?category=CAT2,CAT3)
resp = client.get(
"/api/templates/search",
params={"category": "CAT2,CAT3"},
)
assert resp.status_code == 200, resp.text
ids_csv = {item["id"] for item in resp.json()}
assert t_cat2 in ids_csv and t_cat3 in ids_csv
assert t_cat1_a not in ids_csv and t_cat1_b not in ids_csv
# Unknown category should return empty set when exclusive
resp = client.get(
"/api/templates/search",
params={"category": "NON_EXISTENT"},
)
assert resp.status_code == 200
assert resp.json() == []
def test_templates_search_has_keywords_filter(client: TestClient):
# Create two templates
t1 = _upload_template(client, "HasKW")
t2 = _upload_template(client, "NoKW")
# Add keywords only to t1
_add_keywords(client, t1, ["alpha", "beta"])
# has_keywords=true should include only t1
resp = client.get("/api/templates/search", params={"has_keywords": True})
assert resp.status_code == 200, resp.text
ids_true = {item["id"] for item in resp.json()}
assert t1 in ids_true and t2 not in ids_true
# has_keywords=false should include only t2
resp = client.get("/api/templates/search", params={"has_keywords": False})
assert resp.status_code == 200
ids_false = {item["id"] for item in resp.json()}
assert t2 in ids_false and t1 not in ids_false
def test_templates_categories_listing(client: TestClient):
# Empty DB categories
resp = client.get("/api/templates/categories")
assert resp.status_code == 200
empty = resp.json()
# May contain defaults from previous tests; ensure it's a list
assert isinstance(empty, list)
# Create active/inactive across categories
t1 = _upload_template(client, "K-A1", category="K1")
t2 = _upload_template(client, "K-A2", category="K1")
t3 = _upload_template(client, "K-B1", category="K2")
# Inactivate one of K1
from app.database.base import SessionLocal
from app.models.templates import DocumentTemplate
db = SessionLocal()
try:
tpl = db.query(DocumentTemplate).filter(DocumentTemplate.id == t2).first()
tpl.active = False
db.commit()
finally:
db.close()
# active_only=true (default) should count only active: K1:1, K2:1
resp = client.get("/api/templates/categories")
assert resp.status_code == 200
rows = resp.json()
by_cat = {r["category"]: r["count"] for r in rows}
assert by_cat.get("K1", 0) >= 1
assert by_cat.get("K2", 0) >= 1
# active_only=false should count both entries in K1
resp = client.get("/api/templates/categories", params={"active_only": False})
assert resp.status_code == 200
rows_all = resp.json()
by_cat_all = {r["category"]: r["count"] for r in rows_all}
assert by_cat_all.get("K1", 0) >= 2
assert by_cat_all.get("K2", 0) >= 1