1715 lines
64 KiB
Python
1715 lines
64 KiB
Python
"""
|
|
Advanced Search API endpoints - Comprehensive search across all data types
|
|
"""
|
|
from typing import List, Optional, Union, Dict, Any, Tuple
|
|
from fastapi import APIRouter, Depends, HTTPException, status, Query, Body
|
|
from sqlalchemy.orm import Session, joinedload, Load
|
|
from sqlalchemy import or_, and_, func, desc, asc, text, literal
|
|
from datetime import date, datetime, timedelta
|
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
import re
|
|
|
|
from app.database.base import get_db
|
|
from app.api.search_highlight import (
|
|
build_query_tokens,
|
|
highlight_text,
|
|
create_customer_highlight,
|
|
create_file_highlight,
|
|
create_ledger_highlight,
|
|
create_qdro_highlight,
|
|
)
|
|
from app.models.rolodex import Rolodex, Phone
|
|
from app.models.files import File
|
|
from app.models.ledger import Ledger
|
|
from app.models.qdro import QDRO
|
|
from app.models.lookups import FormIndex, Employee, FileType, FileStatus, TransactionType, TransactionCode, State
|
|
from app.models.user import User
|
|
from app.auth.security import get_current_user
|
|
from app.services.cache import cache_get_json, cache_set_json
|
|
|
|
router = APIRouter()
|
|
@router.get("/_debug")
|
|
async def search_debug(
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Report whether FTS tables and Redis cache are active."""
|
|
# Detect FTS by probing sqlite_master
|
|
fts_status = {
|
|
"rolodex": False,
|
|
"files": False,
|
|
"ledger": False,
|
|
"qdros": False,
|
|
}
|
|
try:
|
|
rows = db.execute(text("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE '%_fts'"))
|
|
names = {r[0] for r in rows}
|
|
fts_status["rolodex"] = "rolodex_fts" in names
|
|
fts_status["files"] = "files_fts" in names
|
|
fts_status["ledger"] = "ledger_fts" in names
|
|
fts_status["qdros"] = "qdros_fts" in names
|
|
except Exception:
|
|
pass
|
|
|
|
# Detect Redis by trying to obtain a client
|
|
try:
|
|
from app.services.cache import _get_client # type: ignore
|
|
client = await _get_client()
|
|
redis_ok = client is not None
|
|
except Exception:
|
|
redis_ok = False
|
|
return {
|
|
"fts": fts_status,
|
|
"redis": redis_ok,
|
|
}
|
|
|
|
|
|
# Enhanced Search Schemas
|
|
|
|
# Allowed values for validation
|
|
ALLOWED_SEARCH_TYPES = {"customer", "file", "ledger", "qdro", "document", "template"}
|
|
ALLOWED_DATE_FIELDS = {"created", "updated", "opened", "closed"}
|
|
ALLOWED_AMOUNT_FIELDS = {"amount", "balance", "total_charges"}
|
|
|
|
# Per-type field support for cross-field validation
|
|
SUPPORTED_DATE_FIELDS_BY_TYPE: Dict[str, set[str]] = {
|
|
"customer": {"created", "updated"},
|
|
"file": {"created", "updated", "opened", "closed"},
|
|
"ledger": {"created", "updated"},
|
|
"qdro": {"created", "updated"},
|
|
"document": {"created", "updated"},
|
|
"template": {"created", "updated"},
|
|
}
|
|
|
|
SUPPORTED_AMOUNT_FIELDS_BY_TYPE: Dict[str, set[str]] = {
|
|
"customer": set(),
|
|
"file": {"balance", "total_charges"},
|
|
"ledger": {"amount"},
|
|
"qdro": set(),
|
|
"document": set(),
|
|
"template": set(),
|
|
}
|
|
ALLOWED_SORT_BY = {"relevance", "date", "amount", "title"}
|
|
ALLOWED_SORT_ORDER = {"asc", "desc"}
|
|
|
|
class SearchResult(BaseModel):
|
|
"""Enhanced search result with metadata"""
|
|
type: str # "customer", "file", "ledger", "qdro", "document", "template", "phone"
|
|
id: Union[str, int]
|
|
title: str
|
|
description: str
|
|
url: str
|
|
metadata: Optional[Dict[str, Any]] = None
|
|
relevance_score: Optional[float] = None
|
|
highlight: Optional[str] = None
|
|
created_at: Optional[datetime] = None
|
|
updated_at: Optional[datetime] = None
|
|
|
|
class AdvancedSearchCriteria(BaseModel):
|
|
"""Advanced search criteria"""
|
|
query: Optional[str] = None
|
|
search_types: List[str] = ["customer", "file", "ledger", "qdro", "document", "template"]
|
|
|
|
# Text search options
|
|
exact_phrase: bool = False
|
|
case_sensitive: bool = False
|
|
whole_words: bool = False
|
|
|
|
# Date filters
|
|
date_field: Optional[str] = None # "created", "updated", "opened", "closed"
|
|
date_from: Optional[date] = None
|
|
date_to: Optional[date] = None
|
|
|
|
# Amount filters
|
|
amount_field: Optional[str] = None # "amount", "balance", "total_charges"
|
|
amount_min: Optional[float] = None
|
|
amount_max: Optional[float] = None
|
|
|
|
# Category filters
|
|
file_types: Optional[List[str]] = None
|
|
file_statuses: Optional[List[str]] = None
|
|
employees: Optional[List[str]] = None
|
|
transaction_types: Optional[List[str]] = None
|
|
states: Optional[List[str]] = None
|
|
|
|
# Boolean filters
|
|
active_only: bool = True
|
|
has_balance: Optional[bool] = None
|
|
is_billed: Optional[bool] = None
|
|
|
|
# Result options
|
|
sort_by: str = "relevance" # relevance, date, amount, title
|
|
sort_order: str = "desc" # asc, desc
|
|
limit: int = Field(50, ge=1, le=200)
|
|
offset: int = Field(0, ge=0)
|
|
|
|
# Field-level validators
|
|
@field_validator("search_types", mode="before")
|
|
@classmethod
|
|
def validate_search_types(cls, value):
|
|
# Coerce to list of unique, lower-cased items preserving order
|
|
raw_list = value or []
|
|
if not isinstance(raw_list, list):
|
|
raw_list = [raw_list]
|
|
seen = set()
|
|
cleaned: List[str] = []
|
|
for item in raw_list:
|
|
token = str(item or "").strip().lower()
|
|
if not token:
|
|
continue
|
|
if token not in ALLOWED_SEARCH_TYPES:
|
|
allowed = ", ".join(sorted(ALLOWED_SEARCH_TYPES))
|
|
raise ValueError(f"search_types contains unknown type '{item}'. Allowed: {allowed}")
|
|
if token not in seen:
|
|
cleaned.append(token)
|
|
seen.add(token)
|
|
return cleaned
|
|
|
|
@field_validator("sort_by")
|
|
@classmethod
|
|
def validate_sort_by(cls, value: str) -> str:
|
|
v = (value or "").strip().lower()
|
|
if v not in ALLOWED_SORT_BY:
|
|
allowed = ", ".join(sorted(ALLOWED_SORT_BY))
|
|
raise ValueError(f"sort_by must be one of: {allowed}")
|
|
return v
|
|
|
|
@field_validator("sort_order")
|
|
@classmethod
|
|
def validate_sort_order(cls, value: str) -> str:
|
|
v = (value or "").strip().lower()
|
|
if v not in ALLOWED_SORT_ORDER:
|
|
allowed = ", ".join(sorted(ALLOWED_SORT_ORDER))
|
|
raise ValueError(f"sort_order must be one of: {allowed}")
|
|
return v
|
|
|
|
@field_validator("date_field")
|
|
@classmethod
|
|
def validate_date_field(cls, value: Optional[str]) -> Optional[str]:
|
|
if value is None:
|
|
return None
|
|
v = str(value).strip().lower()
|
|
if v not in ALLOWED_DATE_FIELDS:
|
|
allowed = ", ".join(sorted(ALLOWED_DATE_FIELDS))
|
|
raise ValueError(f"date_field must be one of: {allowed}")
|
|
return v
|
|
|
|
@field_validator("amount_field")
|
|
@classmethod
|
|
def validate_amount_field(cls, value: Optional[str]) -> Optional[str]:
|
|
if value is None:
|
|
return None
|
|
v = str(value).strip().lower()
|
|
if v not in ALLOWED_AMOUNT_FIELDS:
|
|
allowed = ", ".join(sorted(ALLOWED_AMOUNT_FIELDS))
|
|
raise ValueError(f"amount_field must be one of: {allowed}")
|
|
return v
|
|
|
|
# Cross-field validations
|
|
@model_validator(mode="after")
|
|
def validate_cross_fields(self):
|
|
# Ensure search_types is not empty
|
|
if not self.search_types:
|
|
allowed = ", ".join(sorted(ALLOWED_SEARCH_TYPES))
|
|
raise ValueError(f"search_types cannot be empty. Allowed values: {allowed}")
|
|
|
|
# exact_phrase and whole_words are mutually exclusive
|
|
if self.exact_phrase and self.whole_words:
|
|
raise ValueError("exact_phrase and whole_words cannot both be true. Choose one.")
|
|
|
|
# Date range bounds
|
|
if self.date_from and self.date_to and self.date_from > self.date_to:
|
|
raise ValueError("date_from must be less than or equal to date_to")
|
|
|
|
# Amount range bounds
|
|
if self.amount_min is not None and self.amount_max is not None and self.amount_min > self.amount_max:
|
|
raise ValueError("amount_min must be less than or equal to amount_max")
|
|
|
|
# Ensure date_field is supported by at least one selected search_type
|
|
if self.date_field:
|
|
selected = set(self.search_types or [])
|
|
# Validate allowed first (handled by field validator) then cross-type support
|
|
if not any(self.date_field in SUPPORTED_DATE_FIELDS_BY_TYPE.get(t, set()) for t in selected):
|
|
# Build helpful message
|
|
examples = {
|
|
"opened": "file",
|
|
"closed": "file",
|
|
}
|
|
hint = " Include 'file' in search_types." if examples.get(self.date_field) == "file" else ""
|
|
raise ValueError(f"date_field '{self.date_field}' is not supported by the selected search_types.{hint}")
|
|
|
|
# Ensure amount_field is supported by at least one selected search_type
|
|
if self.amount_field:
|
|
selected = set(self.search_types or [])
|
|
if not any(self.amount_field in SUPPORTED_AMOUNT_FIELDS_BY_TYPE.get(t, set()) for t in selected):
|
|
# Provide actionable hint
|
|
field_owner = {
|
|
"amount": "ledger",
|
|
"balance": "file",
|
|
"total_charges": "file",
|
|
}.get(self.amount_field)
|
|
hint = f" Include '{field_owner}' in search_types." if field_owner else ""
|
|
raise ValueError(f"amount_field '{self.amount_field}' is not supported by the selected search_types.{hint}")
|
|
|
|
return self
|
|
|
|
|
|
def _format_fts_query(raw_query: str, exact_phrase: bool, whole_words: bool) -> str:
|
|
"""Format a user query for SQLite FTS5 according to flags.
|
|
|
|
- exact_phrase: wrap the whole query in quotes for phrase match
|
|
- whole_words: leave tokens as-is (default FTS behavior)
|
|
- not whole_words: use prefix matching per token via '*'
|
|
We keep AND semantics across tokens to mirror SQL fallback behavior.
|
|
"""
|
|
if not raw_query:
|
|
return ""
|
|
if exact_phrase:
|
|
# Escape internal double quotes by doubling them per SQLite rules
|
|
escaped = str(raw_query).replace('"', '""')
|
|
return f'"{escaped}"'
|
|
tokens = build_query_tokens(raw_query)
|
|
if not tokens:
|
|
return raw_query
|
|
rendered = []
|
|
for t in tokens:
|
|
rendered.append(f"{t}" if whole_words else f"{t}*")
|
|
# AND semantics between tokens
|
|
return " AND ".join(rendered)
|
|
|
|
|
|
def _like_whole_word(column, term: str, case_sensitive: bool):
|
|
"""SQLite-friendly whole-word LIKE using padding with spaces.
|
|
This approximates word boundaries by searching in ' ' || lower(column) || ' '.
|
|
"""
|
|
if case_sensitive:
|
|
col_expr = literal(' ') + column + literal(' ')
|
|
return col_expr.like(f"% {term} %")
|
|
lowered = func.lower(column)
|
|
col_expr = literal(' ') + lowered + literal(' ')
|
|
return col_expr.like(f"% {term.lower()} %")
|
|
|
|
|
|
def _like_phrase_word_boundaries(column, phrase: str, case_sensitive: bool):
|
|
"""LIKE match for an exact phrase bounded by spaces.
|
|
This approximates word-boundary phrase matching similar to FTS5 token phrase.
|
|
"""
|
|
if case_sensitive:
|
|
col_expr = literal(' ') + column + literal(' ')
|
|
return col_expr.like(f"% {phrase} %")
|
|
lowered = func.lower(column)
|
|
col_expr = literal(' ') + lowered + literal(' ')
|
|
return col_expr.like(f"% {phrase.lower()} %")
|
|
|
|
class SearchFilter(BaseModel):
|
|
"""Individual search filter"""
|
|
field: str
|
|
operator: str # "equals", "contains", "starts_with", "ends_with", "greater_than", "less_than", "between", "in", "not_in"
|
|
value: Union[str, int, float, List[Union[str, int, float]]]
|
|
|
|
class SavedSearch(BaseModel):
|
|
"""Saved search definition"""
|
|
id: Optional[int] = None
|
|
name: str
|
|
description: Optional[str] = None
|
|
criteria: AdvancedSearchCriteria
|
|
is_public: bool = False
|
|
created_by: Optional[str] = None
|
|
created_at: Optional[datetime] = None
|
|
last_used: Optional[datetime] = None
|
|
use_count: int = 0
|
|
|
|
class SearchStats(BaseModel):
|
|
"""Search statistics"""
|
|
total_customers: int
|
|
total_files: int
|
|
total_ledger_entries: int
|
|
total_qdros: int
|
|
total_documents: int
|
|
total_templates: int
|
|
total_phones: int
|
|
search_execution_time: float
|
|
|
|
class AdvancedSearchResponse(BaseModel):
|
|
"""Advanced search response"""
|
|
criteria: AdvancedSearchCriteria
|
|
results: List[SearchResult]
|
|
stats: SearchStats
|
|
facets: Dict[str, Dict[str, int]]
|
|
total_results: int
|
|
page_info: Dict[str, Any]
|
|
|
|
class GlobalSearchResponse(BaseModel):
|
|
"""Enhanced global search response"""
|
|
query: str
|
|
total_results: int
|
|
execution_time: float
|
|
customers: List[SearchResult]
|
|
files: List[SearchResult]
|
|
ledgers: List[SearchResult]
|
|
qdros: List[SearchResult]
|
|
documents: List[SearchResult]
|
|
templates: List[SearchResult]
|
|
phones: List[SearchResult]
|
|
|
|
|
|
# Advanced Search Endpoints
|
|
|
|
@router.post("/advanced", response_model=AdvancedSearchResponse)
|
|
async def advanced_search(
|
|
criteria: AdvancedSearchCriteria = Body(...),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Advanced search with complex criteria and filtering"""
|
|
start_time = datetime.now()
|
|
|
|
# Cache lookup keyed by user and entire criteria (including pagination)
|
|
try:
|
|
cached = await cache_get_json(
|
|
kind="advanced",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"criteria": criteria.model_dump(mode="json")},
|
|
)
|
|
except Exception:
|
|
cached = None
|
|
if cached:
|
|
return AdvancedSearchResponse(**cached)
|
|
|
|
all_results = []
|
|
facets = {}
|
|
|
|
# Search each entity type based on criteria
|
|
if "customer" in criteria.search_types:
|
|
customer_results = await _search_customers(criteria, db)
|
|
all_results.extend(customer_results)
|
|
|
|
if "file" in criteria.search_types:
|
|
file_results = await _search_files(criteria, db)
|
|
all_results.extend(file_results)
|
|
|
|
if "ledger" in criteria.search_types:
|
|
ledger_results = await _search_ledger(criteria, db)
|
|
all_results.extend(ledger_results)
|
|
|
|
if "qdro" in criteria.search_types:
|
|
qdro_results = await _search_qdros(criteria, db)
|
|
all_results.extend(qdro_results)
|
|
|
|
if "document" in criteria.search_types:
|
|
document_results = await _search_documents(criteria, db)
|
|
all_results.extend(document_results)
|
|
|
|
if "template" in criteria.search_types:
|
|
template_results = await _search_templates(criteria, db)
|
|
all_results.extend(template_results)
|
|
|
|
# Sort results
|
|
sorted_results = _sort_search_results(all_results, criteria.sort_by, criteria.sort_order)
|
|
|
|
# Apply pagination
|
|
total_count = len(sorted_results)
|
|
paginated_results = sorted_results[criteria.offset:criteria.offset + criteria.limit]
|
|
|
|
# Calculate facets
|
|
facets = _calculate_facets(sorted_results)
|
|
|
|
# Calculate stats
|
|
execution_time = (datetime.now() - start_time).total_seconds()
|
|
stats = await _calculate_search_stats(db, execution_time)
|
|
|
|
# Page info
|
|
page_info = {
|
|
"current_page": (criteria.offset // criteria.limit) + 1,
|
|
"total_pages": (total_count + criteria.limit - 1) // criteria.limit,
|
|
"has_next": criteria.offset + criteria.limit < total_count,
|
|
"has_previous": criteria.offset > 0,
|
|
}
|
|
|
|
# Build response object once
|
|
response = AdvancedSearchResponse(
|
|
criteria=criteria,
|
|
results=paginated_results,
|
|
stats=stats,
|
|
facets=facets,
|
|
total_results=total_count,
|
|
page_info=page_info,
|
|
)
|
|
|
|
# Store in cache (best-effort)
|
|
try:
|
|
await cache_set_json(
|
|
kind="advanced",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"criteria": criteria.model_dump(mode="json")},
|
|
value=response.model_dump(mode="json"),
|
|
ttl_seconds=90,
|
|
)
|
|
except Exception:
|
|
pass
|
|
|
|
return response
|
|
|
|
|
|
@router.get("/global", response_model=GlobalSearchResponse)
|
|
async def global_search(
|
|
q: str = Query(..., min_length=1),
|
|
limit: int = Query(10, ge=1, le=50),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Enhanced global search across all entities"""
|
|
start_time = datetime.now()
|
|
# Cache lookup
|
|
cached = await cache_get_json(
|
|
kind="global",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"q": q, "limit": limit},
|
|
)
|
|
if cached:
|
|
return GlobalSearchResponse(**cached)
|
|
|
|
# Create criteria for global search
|
|
criteria = AdvancedSearchCriteria(
|
|
query=q,
|
|
search_types=["customer", "file", "ledger", "qdro", "document", "template"],
|
|
limit=limit
|
|
)
|
|
|
|
# Search each entity type
|
|
customer_results = await _search_customers(criteria, db)
|
|
file_results = await _search_files(criteria, db)
|
|
ledger_results = await _search_ledger(criteria, db)
|
|
qdro_results = await _search_qdros(criteria, db)
|
|
document_results = await _search_documents(criteria, db)
|
|
template_results = await _search_templates(criteria, db)
|
|
phone_results = await _search_phones(criteria, db)
|
|
|
|
total_results = (len(customer_results) + len(file_results) + len(ledger_results) +
|
|
len(qdro_results) + len(document_results) + len(template_results) + len(phone_results))
|
|
|
|
execution_time = (datetime.now() - start_time).total_seconds()
|
|
|
|
response = GlobalSearchResponse(
|
|
query=q,
|
|
total_results=total_results,
|
|
execution_time=execution_time,
|
|
customers=customer_results[:limit],
|
|
files=file_results[:limit],
|
|
ledgers=ledger_results[:limit],
|
|
qdros=qdro_results[:limit],
|
|
documents=document_results[:limit],
|
|
templates=template_results[:limit],
|
|
phones=phone_results[:limit]
|
|
)
|
|
try:
|
|
await cache_set_json(
|
|
kind="global",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"q": q, "limit": limit},
|
|
value=response.model_dump(mode="json"),
|
|
ttl_seconds=90,
|
|
)
|
|
except Exception:
|
|
pass
|
|
return response
|
|
|
|
|
|
@router.get("/suggestions")
|
|
async def search_suggestions(
|
|
q: str = Query(..., min_length=1),
|
|
limit: int = Query(10, ge=1, le=20),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Get search suggestions and autocomplete"""
|
|
cached = await cache_get_json(
|
|
kind="suggestions",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"q": q, "limit": limit},
|
|
)
|
|
if cached:
|
|
return cached
|
|
suggestions = []
|
|
|
|
# Customer name suggestions
|
|
customers = db.query(Rolodex.first, Rolodex.last).filter(
|
|
or_(
|
|
Rolodex.first.ilike(f"{q}%"),
|
|
Rolodex.last.ilike(f"{q}%")
|
|
)
|
|
).limit(limit//2).all()
|
|
|
|
for customer in customers:
|
|
full_name = f"{customer.first or ''} {customer.last}".strip()
|
|
if full_name:
|
|
suggestions.append({
|
|
"text": full_name,
|
|
"type": "customer_name",
|
|
"category": "Customers"
|
|
})
|
|
|
|
# File number suggestions
|
|
files = db.query(File.file_no, File.regarding).filter(
|
|
File.file_no.ilike(f"{q}%")
|
|
).limit(limit//2).all()
|
|
|
|
for file_obj in files:
|
|
suggestions.append({
|
|
"text": file_obj.file_no,
|
|
"type": "file_number",
|
|
"category": "Files",
|
|
"description": file_obj.regarding
|
|
})
|
|
|
|
payload = {"suggestions": suggestions[:limit]}
|
|
try:
|
|
await cache_set_json(
|
|
kind="suggestions",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"q": q, "limit": limit},
|
|
value=payload,
|
|
ttl_seconds=60,
|
|
)
|
|
except Exception:
|
|
pass
|
|
return payload
|
|
|
|
|
|
@router.get("/last_criteria")
|
|
async def get_last_criteria(
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Return the last advanced search criteria for this user if present (best-effort)."""
|
|
try:
|
|
cached = await cache_get_json(
|
|
kind="last_criteria",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"v": 1},
|
|
)
|
|
return cached or {}
|
|
except Exception:
|
|
return {}
|
|
|
|
|
|
@router.post("/last_criteria")
|
|
async def set_last_criteria(
|
|
criteria: AdvancedSearchCriteria = Body(...),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Persist the last advanced search criteria for this user (best-effort)."""
|
|
try:
|
|
await cache_set_json(
|
|
kind="last_criteria",
|
|
user_id=str(getattr(current_user, "id", "")),
|
|
parts={"v": 1},
|
|
value=criteria.model_dump(mode="json"),
|
|
ttl_seconds=60 * 60 * 24 * 7, # 7 days
|
|
)
|
|
except Exception:
|
|
pass
|
|
return {"ok": True}
|
|
|
|
|
|
@router.get("/facets")
|
|
async def get_search_facets(
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Get available search facets and filters"""
|
|
|
|
# File types
|
|
file_types = db.query(FileType.type_code, FileType.description).filter(
|
|
FileType.active == True
|
|
).all()
|
|
|
|
# File statuses
|
|
file_statuses = db.query(FileStatus.status_code, FileStatus.description).filter(
|
|
FileStatus.active == True
|
|
).all()
|
|
|
|
# Employees
|
|
employees = db.query(Employee.empl_num, Employee.first_name, Employee.last_name).filter(
|
|
Employee.active == True
|
|
).all()
|
|
|
|
# Transaction types
|
|
transaction_types = db.query(TransactionType.t_type, TransactionType.description).filter(
|
|
TransactionType.active == True
|
|
).all()
|
|
|
|
# States
|
|
states = db.query(State.abbreviation, State.name).filter(
|
|
State.active == True
|
|
).order_by(State.name).all()
|
|
|
|
return {
|
|
"file_types": [{"code": ft[0], "name": ft[1]} for ft in file_types],
|
|
"file_statuses": [{"code": fs[0], "name": fs[1]} for fs in file_statuses],
|
|
"employees": [{"code": emp[0], "name": f"{emp[1] or ''} {emp[2]}".strip()} for emp in employees],
|
|
"transaction_types": [{"code": tt[0], "name": tt[1]} for tt in transaction_types],
|
|
"states": [{"code": st[0], "name": st[1]} for st in states],
|
|
"date_fields": [
|
|
{"code": "created", "name": "Created Date"},
|
|
{"code": "updated", "name": "Updated Date"},
|
|
{"code": "opened", "name": "File Opened Date"},
|
|
{"code": "closed", "name": "File Closed Date"}
|
|
],
|
|
"amount_fields": [
|
|
{"code": "amount", "name": "Transaction Amount"},
|
|
{"code": "balance", "name": "Account Balance"},
|
|
{"code": "total_charges", "name": "Total Charges"}
|
|
],
|
|
"sort_options": [
|
|
{"code": "relevance", "name": "Relevance"},
|
|
{"code": "date", "name": "Date"},
|
|
{"code": "amount", "name": "Amount"},
|
|
{"code": "title", "name": "Title"}
|
|
]
|
|
}
|
|
|
|
|
|
# Legacy endpoints for backward compatibility
|
|
@router.get("/customers", response_model=List[SearchResult])
|
|
async def search_customers(
|
|
q: str = Query(..., min_length=2),
|
|
limit: int = Query(20, ge=1, le=100),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Search customers (legacy endpoint)"""
|
|
criteria = AdvancedSearchCriteria(
|
|
query=q,
|
|
search_types=["customer"],
|
|
limit=limit
|
|
)
|
|
return await _search_customers(criteria, db)
|
|
|
|
|
|
@router.get("/files", response_model=List[SearchResult])
|
|
async def search_files(
|
|
q: str = Query(..., min_length=2),
|
|
limit: int = Query(20, ge=1, le=100),
|
|
db: Session = Depends(get_db),
|
|
current_user: User = Depends(get_current_user)
|
|
):
|
|
"""Search files (legacy endpoint)"""
|
|
criteria = AdvancedSearchCriteria(
|
|
query=q,
|
|
search_types=["file"],
|
|
limit=limit
|
|
)
|
|
return await _search_files(criteria, db)
|
|
|
|
|
|
# Search Implementation Functions
|
|
|
|
async def _search_customers(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search customers with advanced criteria. Uses FTS5 when available."""
|
|
results: List[SearchResult] = []
|
|
|
|
# Attempt FTS5 path when there's a query string
|
|
if criteria.query:
|
|
fts_sql = """
|
|
SELECT r.*
|
|
FROM rolodex_fts f
|
|
JOIN rolodex r ON r.rowid = f.rowid
|
|
WHERE f MATCH :q
|
|
ORDER BY bm25(f) ASC
|
|
LIMIT :limit
|
|
"""
|
|
try:
|
|
fts_q = _format_fts_query(criteria.query, criteria.exact_phrase, criteria.whole_words)
|
|
rows = db.execute(
|
|
text(fts_sql),
|
|
{"q": fts_q, "limit": criteria.limit}
|
|
).mappings().all()
|
|
|
|
# Optionally apply state/date filters post-FTS (small result set)
|
|
filtered = []
|
|
for row in rows:
|
|
if criteria.states and row.get("abrev") not in set(criteria.states):
|
|
continue
|
|
if criteria.date_from or criteria.date_to:
|
|
# Use created_at/updated_at when requested
|
|
if criteria.date_field == "created" and criteria.date_from and row.get("created_at") and row["created_at"] < criteria.date_from:
|
|
continue
|
|
if criteria.date_field == "created" and criteria.date_to and row.get("created_at") and row["created_at"] > criteria.date_to:
|
|
continue
|
|
if criteria.date_field == "updated" and criteria.date_from and row.get("updated_at") and row["updated_at"] < criteria.date_from:
|
|
continue
|
|
if criteria.date_field == "updated" and criteria.date_to and row.get("updated_at") and row["updated_at"] > criteria.date_to:
|
|
continue
|
|
filtered.append(row)
|
|
|
|
for row in filtered[: criteria.limit]:
|
|
full_name = f"{row.get('first') or ''} {row.get('last') or ''}".strip()
|
|
location = f"{row.get('city') or ''}, {row.get('abrev') or ''}".strip(', ')
|
|
# Build a lightweight object-like view for downstream helpers
|
|
class _C:
|
|
pass
|
|
c = _C()
|
|
for k, v in row.items():
|
|
setattr(c, k, v)
|
|
# Phones require relationship; fetch lazily for these ids
|
|
phones = db.query(Phone.phone).filter(Phone.rolodex_id == row["id"]).all()
|
|
phone_numbers = [p[0] for p in phones]
|
|
|
|
results.append(SearchResult(
|
|
type="customer",
|
|
id=row["id"],
|
|
title=full_name or f"Customer {row['id']}",
|
|
description=f"ID: {row['id']} | {location}",
|
|
url=f"/customers?id={row['id']}",
|
|
metadata={
|
|
"location": location,
|
|
"email": row.get("email"),
|
|
"phones": phone_numbers,
|
|
"group": row.get("group"),
|
|
"state": row.get("abrev"),
|
|
},
|
|
relevance_score=1.0, # bm25 used for sort; keep minimal score
|
|
highlight=_create_customer_highlight(c, criteria.query or ""),
|
|
created_at=row.get("created_at"),
|
|
updated_at=row.get("updated_at"),
|
|
))
|
|
|
|
return results
|
|
except Exception:
|
|
# Fallback to legacy path when FTS isn't available
|
|
pass
|
|
|
|
# Legacy SQL path (no query or FTS not available)
|
|
query = db.query(Rolodex).options(
|
|
Load(Rolodex).load_only(
|
|
Rolodex.id,
|
|
Rolodex.first,
|
|
Rolodex.last,
|
|
Rolodex.city,
|
|
Rolodex.abrev,
|
|
Rolodex.email,
|
|
Rolodex.memo,
|
|
Rolodex.created_at,
|
|
Rolodex.updated_at,
|
|
),
|
|
joinedload(Rolodex.phone_numbers).load_only(Phone.phone),
|
|
)
|
|
|
|
if criteria.query:
|
|
search_conditions = []
|
|
if criteria.exact_phrase:
|
|
search_term = criteria.query
|
|
search_conditions.append(
|
|
or_(
|
|
func.concat(Rolodex.first, ' ', Rolodex.last).contains(search_term),
|
|
Rolodex.memo.contains(search_term),
|
|
)
|
|
)
|
|
else:
|
|
search_terms = criteria.query.split()
|
|
for term in search_terms:
|
|
if criteria.case_sensitive:
|
|
search_conditions.append(
|
|
or_(
|
|
Rolodex.id.contains(term),
|
|
Rolodex.last.contains(term),
|
|
Rolodex.first.contains(term),
|
|
Rolodex.city.contains(term),
|
|
Rolodex.email.contains(term),
|
|
Rolodex.memo.contains(term),
|
|
)
|
|
)
|
|
else:
|
|
search_conditions.append(
|
|
or_(
|
|
Rolodex.id.ilike(f"%{term}%"),
|
|
Rolodex.last.ilike(f"%{term}%"),
|
|
Rolodex.first.ilike(f"%{term}%"),
|
|
Rolodex.city.ilike(f"%{term}%"),
|
|
Rolodex.email.ilike(f"%{term}%"),
|
|
Rolodex.memo.ilike(f"%{term}%"),
|
|
)
|
|
)
|
|
if search_conditions:
|
|
query = query.filter(and_(*search_conditions))
|
|
|
|
if criteria.states:
|
|
query = query.filter(Rolodex.abrev.in_(criteria.states))
|
|
|
|
if criteria.date_from or criteria.date_to:
|
|
date_field_map = {"created": Rolodex.created_at, "updated": Rolodex.updated_at}
|
|
if criteria.date_field in date_field_map:
|
|
field = date_field_map[criteria.date_field]
|
|
if criteria.date_from:
|
|
query = query.filter(field >= criteria.date_from)
|
|
if criteria.date_to:
|
|
query = query.filter(field <= criteria.date_to)
|
|
|
|
customers = query.limit(criteria.limit).all()
|
|
|
|
for customer in customers:
|
|
full_name = f"{customer.first or ''} {customer.last}".strip()
|
|
location = f"{customer.city or ''}, {customer.abrev or ''}".strip(', ')
|
|
relevance = _calculate_customer_relevance(customer, criteria.query or "")
|
|
highlight = _create_customer_highlight(customer, criteria.query or "")
|
|
phone_numbers = [p.phone for p in customer.phone_numbers] if customer.phone_numbers else []
|
|
|
|
results.append(SearchResult(
|
|
type="customer",
|
|
id=customer.id,
|
|
title=full_name or f"Customer {customer.id}",
|
|
description=f"ID: {customer.id} | {location}",
|
|
url=f"/customers?id={customer.id}",
|
|
metadata={
|
|
"location": location,
|
|
"email": customer.email,
|
|
"phones": phone_numbers,
|
|
"group": customer.group,
|
|
"state": customer.abrev,
|
|
},
|
|
relevance_score=relevance,
|
|
highlight=highlight,
|
|
created_at=customer.created_at,
|
|
updated_at=customer.updated_at,
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
async def _search_files(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search files with advanced criteria. Uses FTS5 when available."""
|
|
results: List[SearchResult] = []
|
|
|
|
if criteria.query:
|
|
fts_sql = """
|
|
SELECT f.*
|
|
FROM files_fts x
|
|
JOIN files f ON f.rowid = x.rowid
|
|
WHERE x MATCH :q
|
|
ORDER BY bm25(x) ASC
|
|
LIMIT :limit
|
|
"""
|
|
try:
|
|
fts_q = _format_fts_query(criteria.query, criteria.exact_phrase, criteria.whole_words)
|
|
rows = db.execute(text(fts_sql), {"q": fts_q, "limit": criteria.limit}).mappings().all()
|
|
|
|
# Post-filtering on small set
|
|
filtered = []
|
|
for row in rows:
|
|
if criteria.file_types and row.get("file_type") not in set(criteria.file_types):
|
|
continue
|
|
if criteria.file_statuses and row.get("status") not in set(criteria.file_statuses):
|
|
continue
|
|
if criteria.employees and row.get("empl_num") not in set(criteria.employees):
|
|
continue
|
|
if criteria.has_balance is not None:
|
|
owing = float(row.get("amount_owing") or 0)
|
|
if criteria.has_balance and not (owing > 0):
|
|
continue
|
|
if not criteria.has_balance and not (owing <= 0):
|
|
continue
|
|
if criteria.amount_min is not None and float(row.get("amount_owing") or 0) < criteria.amount_min:
|
|
continue
|
|
if criteria.amount_max is not None and float(row.get("amount_owing") or 0) > criteria.amount_max:
|
|
continue
|
|
if criteria.date_from or criteria.date_to:
|
|
field = None
|
|
if criteria.date_field == "created":
|
|
field = row.get("created_at")
|
|
elif criteria.date_field == "updated":
|
|
field = row.get("updated_at")
|
|
elif criteria.date_field == "opened":
|
|
field = row.get("opened")
|
|
elif criteria.date_field == "closed":
|
|
field = row.get("closed")
|
|
if criteria.date_from and field and field < criteria.date_from:
|
|
continue
|
|
if criteria.date_to and field and field > criteria.date_to:
|
|
continue
|
|
filtered.append(row)
|
|
|
|
for row in filtered[: criteria.limit]:
|
|
# Load owner name for display
|
|
owner = db.query(Rolodex.first, Rolodex.last).filter(Rolodex.id == row.get("id")).first()
|
|
client_name = f"{(owner.first if owner else '') or ''} {(owner.last if owner else '') or ''}".strip()
|
|
class _F: pass
|
|
fobj = _F()
|
|
for k, v in row.items():
|
|
setattr(fobj, k, v)
|
|
|
|
results.append(SearchResult(
|
|
type="file",
|
|
id=row["file_no"],
|
|
title=f"File #{row['file_no']}",
|
|
description=f"Client: {client_name} | {row.get('regarding') or 'No description'} | Status: {row.get('status')}",
|
|
url=f"/files?file_no={row['file_no']}",
|
|
metadata={
|
|
"client_id": row.get("id"),
|
|
"client_name": client_name,
|
|
"file_type": row.get("file_type"),
|
|
"status": row.get("status"),
|
|
"employee": row.get("empl_num"),
|
|
"amount_owing": float(row.get("amount_owing") or 0),
|
|
"total_charges": float(row.get("total_charges") or 0),
|
|
},
|
|
relevance_score=1.0,
|
|
highlight=_create_file_highlight(fobj, criteria.query or ""),
|
|
created_at=row.get("created_at"),
|
|
updated_at=row.get("updated_at"),
|
|
))
|
|
|
|
return results
|
|
except Exception:
|
|
pass
|
|
|
|
# Fallback legacy path
|
|
query = db.query(File).options(
|
|
Load(File).load_only(
|
|
File.file_no,
|
|
File.id,
|
|
File.regarding,
|
|
File.status,
|
|
File.file_type,
|
|
File.empl_num,
|
|
File.amount_owing,
|
|
File.total_charges,
|
|
File.created_at,
|
|
File.updated_at,
|
|
),
|
|
joinedload(File.owner).load_only(Rolodex.first, Rolodex.last),
|
|
)
|
|
|
|
if criteria.query:
|
|
search_conditions = []
|
|
if criteria.exact_phrase:
|
|
phrase = criteria.query
|
|
search_conditions.append(or_(
|
|
_like_phrase_word_boundaries(File.regarding, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(File.file_type, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(File.memo, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(File.file_no, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(File.id, phrase, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
tokens = build_query_tokens(criteria.query)
|
|
for term in tokens:
|
|
if criteria.whole_words:
|
|
search_conditions.append(or_(
|
|
_like_whole_word(File.regarding, term, criteria.case_sensitive),
|
|
_like_whole_word(File.file_type, term, criteria.case_sensitive),
|
|
_like_whole_word(File.memo, term, criteria.case_sensitive),
|
|
_like_whole_word(File.file_no, term, criteria.case_sensitive),
|
|
_like_whole_word(File.id, term, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
if criteria.case_sensitive:
|
|
search_conditions.append(or_(
|
|
File.file_no.contains(term),
|
|
File.id.contains(term),
|
|
File.regarding.contains(term),
|
|
File.file_type.contains(term),
|
|
File.memo.contains(term),
|
|
))
|
|
else:
|
|
search_conditions.append(or_(
|
|
File.file_no.ilike(f"%{term}%"),
|
|
File.id.ilike(f"%{term}%"),
|
|
File.regarding.ilike(f"%{term}%"),
|
|
File.file_type.ilike(f"%{term}%"),
|
|
File.memo.ilike(f"%{term}%"),
|
|
))
|
|
if search_conditions:
|
|
query = query.filter(and_(*search_conditions))
|
|
|
|
if criteria.file_types:
|
|
query = query.filter(File.file_type.in_(criteria.file_types))
|
|
if criteria.file_statuses:
|
|
query = query.filter(File.status.in_(criteria.file_statuses))
|
|
if criteria.employees:
|
|
query = query.filter(File.empl_num.in_(criteria.employees))
|
|
if criteria.has_balance is not None:
|
|
query = query.filter(File.amount_owing > 0) if criteria.has_balance else query.filter(File.amount_owing <= 0)
|
|
if criteria.amount_min is not None:
|
|
query = query.filter(File.amount_owing >= criteria.amount_min)
|
|
if criteria.amount_max is not None:
|
|
query = query.filter(File.amount_owing <= criteria.amount_max)
|
|
if criteria.date_from or criteria.date_to:
|
|
date_field_map = {"created": File.created_at, "updated": File.updated_at, "opened": File.opened, "closed": File.closed}
|
|
if criteria.date_field in date_field_map:
|
|
field = date_field_map[criteria.date_field]
|
|
if criteria.date_from:
|
|
query = query.filter(field >= criteria.date_from)
|
|
if criteria.date_to:
|
|
query = query.filter(field <= criteria.date_to)
|
|
|
|
files = query.limit(criteria.limit).all()
|
|
|
|
for file_obj in files:
|
|
client_name = ""
|
|
if file_obj.owner:
|
|
client_name = f"{file_obj.owner.first or ''} {file_obj.owner.last}".strip()
|
|
relevance = _calculate_file_relevance(file_obj, criteria.query or "")
|
|
highlight = _create_file_highlight(file_obj, criteria.query or "")
|
|
results.append(SearchResult(
|
|
type="file",
|
|
id=file_obj.file_no,
|
|
title=f"File #{file_obj.file_no}",
|
|
description=f"Client: {client_name} | {file_obj.regarding or 'No description'} | Status: {file_obj.status}",
|
|
url=f"/files?file_no={file_obj.file_no}",
|
|
metadata={
|
|
"client_id": file_obj.id,
|
|
"client_name": client_name,
|
|
"file_type": file_obj.file_type,
|
|
"status": file_obj.status,
|
|
"employee": file_obj.empl_num,
|
|
"amount_owing": float(file_obj.amount_owing or 0),
|
|
"total_charges": float(file_obj.total_charges or 0),
|
|
},
|
|
relevance_score=relevance,
|
|
highlight=highlight,
|
|
created_at=file_obj.created_at,
|
|
updated_at=file_obj.updated_at,
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
async def _search_ledger(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search ledger entries with advanced criteria. Uses FTS5 when available."""
|
|
results: List[SearchResult] = []
|
|
|
|
if criteria.query:
|
|
fts_sql = """
|
|
SELECT l.*
|
|
FROM ledger_fts x
|
|
JOIN ledger l ON l.rowid = x.rowid
|
|
WHERE x MATCH :q
|
|
ORDER BY bm25(x) ASC
|
|
LIMIT :limit
|
|
"""
|
|
try:
|
|
fts_q = _format_fts_query(criteria.query, criteria.exact_phrase, criteria.whole_words)
|
|
rows = db.execute(text(fts_sql), {"q": fts_q, "limit": criteria.limit}).mappings().all()
|
|
filtered = []
|
|
for row in rows:
|
|
if criteria.transaction_types and row.get("t_type") not in set(criteria.transaction_types):
|
|
continue
|
|
if criteria.employees and row.get("empl_num") not in set(criteria.employees):
|
|
continue
|
|
if criteria.is_billed is not None:
|
|
billed_flag = (row.get("billed") == "Y")
|
|
if criteria.is_billed != billed_flag:
|
|
continue
|
|
if criteria.amount_min is not None and float(row.get("amount") or 0) < criteria.amount_min:
|
|
continue
|
|
if criteria.amount_max is not None and float(row.get("amount") or 0) > criteria.amount_max:
|
|
continue
|
|
if criteria.date_from and row.get("date") and row["date"] < criteria.date_from:
|
|
continue
|
|
if criteria.date_to and row.get("date") and row["date"] > criteria.date_to:
|
|
continue
|
|
filtered.append(row)
|
|
|
|
# Fetch owner names for display
|
|
for row in filtered[: criteria.limit]:
|
|
client_name = ""
|
|
# Join to files -> rolodex for name
|
|
owner = db.query(Rolodex.first, Rolodex.last).join(File, File.id == Rolodex.id).filter(File.file_no == row.get("file_no")).first()
|
|
if owner:
|
|
client_name = f"{owner.first or ''} {owner.last or ''}".strip()
|
|
class _L: pass
|
|
lobj = _L()
|
|
for k, v in row.items():
|
|
setattr(lobj, k, v)
|
|
results.append(SearchResult(
|
|
type="ledger",
|
|
id=row["id"],
|
|
title=f"Transaction {row.get('t_code')} - ${row.get('amount')}",
|
|
description=f"File: {row.get('file_no')} | Client: {client_name} | Date: {row.get('date')} | {row.get('note') or 'No note'}",
|
|
url=f"/financial?file_no={row.get('file_no')}",
|
|
metadata={
|
|
"file_no": row.get("file_no"),
|
|
"transaction_type": row.get("t_type"),
|
|
"transaction_code": row.get("t_code"),
|
|
"amount": float(row.get("amount") or 0),
|
|
"quantity": float(row.get("quantity") or 0),
|
|
"rate": float(row.get("rate") or 0),
|
|
"employee": row.get("empl_num"),
|
|
"billed": row.get("billed") == "Y",
|
|
"date": row.get("date").isoformat() if row.get("date") else None,
|
|
},
|
|
relevance_score=1.0,
|
|
highlight=_create_ledger_highlight(lobj, criteria.query or ""),
|
|
created_at=row.get("created_at"),
|
|
updated_at=row.get("updated_at"),
|
|
))
|
|
|
|
return results
|
|
except Exception:
|
|
pass
|
|
|
|
# Fallback legacy path
|
|
query = db.query(Ledger).options(
|
|
Load(Ledger).load_only(
|
|
Ledger.id,
|
|
Ledger.file_no,
|
|
Ledger.t_code,
|
|
Ledger.t_type,
|
|
Ledger.empl_num,
|
|
Ledger.quantity,
|
|
Ledger.rate,
|
|
Ledger.amount,
|
|
Ledger.billed,
|
|
Ledger.note,
|
|
Ledger.date,
|
|
Ledger.created_at,
|
|
Ledger.updated_at,
|
|
),
|
|
joinedload(Ledger.file)
|
|
.load_only(File.file_no, File.id)
|
|
.joinedload(File.owner)
|
|
.load_only(Rolodex.first, Rolodex.last),
|
|
)
|
|
if criteria.query:
|
|
search_conditions = []
|
|
if criteria.exact_phrase:
|
|
phrase = criteria.query
|
|
search_conditions.append(or_(
|
|
_like_phrase_word_boundaries(Ledger.note, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(Ledger.t_code, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(Ledger.file_no, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(Ledger.empl_num, phrase, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
tokens = build_query_tokens(criteria.query)
|
|
for term in tokens:
|
|
if criteria.whole_words:
|
|
search_conditions.append(or_(
|
|
_like_whole_word(Ledger.note, term, criteria.case_sensitive),
|
|
_like_whole_word(Ledger.t_code, term, criteria.case_sensitive),
|
|
_like_whole_word(Ledger.file_no, term, criteria.case_sensitive),
|
|
_like_whole_word(Ledger.empl_num, term, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
if criteria.case_sensitive:
|
|
search_conditions.append(or_(
|
|
Ledger.file_no.contains(term),
|
|
Ledger.t_code.contains(term),
|
|
Ledger.note.contains(term),
|
|
Ledger.empl_num.contains(term),
|
|
))
|
|
else:
|
|
search_conditions.append(or_(
|
|
Ledger.file_no.ilike(f"%{term}%"),
|
|
Ledger.t_code.ilike(f"%{term}%"),
|
|
Ledger.note.ilike(f"%{term}%"),
|
|
Ledger.empl_num.ilike(f"%{term}%"),
|
|
))
|
|
if search_conditions:
|
|
query = query.filter(and_(*search_conditions))
|
|
if criteria.transaction_types:
|
|
query = query.filter(Ledger.t_type.in_(criteria.transaction_types))
|
|
if criteria.employees:
|
|
query = query.filter(Ledger.empl_num.in_(criteria.employees))
|
|
if criteria.is_billed is not None:
|
|
query = query.filter(Ledger.billed == ("Y" if criteria.is_billed else "N"))
|
|
if criteria.amount_min is not None:
|
|
query = query.filter(Ledger.amount >= criteria.amount_min)
|
|
if criteria.amount_max is not None:
|
|
query = query.filter(Ledger.amount <= criteria.amount_max)
|
|
if criteria.date_from:
|
|
query = query.filter(Ledger.date >= criteria.date_from)
|
|
if criteria.date_to:
|
|
query = query.filter(Ledger.date <= criteria.date_to)
|
|
ledgers = query.limit(criteria.limit).all()
|
|
|
|
for ledger in ledgers:
|
|
client_name = ""
|
|
if ledger.file and ledger.file.owner:
|
|
client_name = f"{ledger.file.owner.first or ''} {ledger.file.owner.last}".strip()
|
|
relevance = _calculate_ledger_relevance(ledger, criteria.query or "")
|
|
highlight = _create_ledger_highlight(ledger, criteria.query or "")
|
|
results.append(SearchResult(
|
|
type="ledger",
|
|
id=ledger.id,
|
|
title=f"Transaction {ledger.t_code} - ${ledger.amount}",
|
|
description=f"File: {ledger.file_no} | Client: {client_name} | Date: {ledger.date} | {ledger.note or 'No note'}",
|
|
url=f"/financial?file_no={ledger.file_no}",
|
|
metadata={
|
|
"file_no": ledger.file_no,
|
|
"transaction_type": ledger.t_type,
|
|
"transaction_code": ledger.t_code,
|
|
"amount": float(ledger.amount),
|
|
"quantity": float(ledger.quantity or 0),
|
|
"rate": float(ledger.rate or 0),
|
|
"employee": ledger.empl_num,
|
|
"billed": ledger.billed == "Y",
|
|
"date": ledger.date.isoformat() if ledger.date else None,
|
|
},
|
|
relevance_score=relevance,
|
|
highlight=highlight,
|
|
created_at=ledger.created_at,
|
|
updated_at=ledger.updated_at,
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
async def _search_qdros(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search QDRO documents with advanced criteria. Uses FTS5 when available."""
|
|
results: List[SearchResult] = []
|
|
|
|
if criteria.query:
|
|
fts_sql = """
|
|
SELECT q.*
|
|
FROM qdros_fts x
|
|
JOIN qdros q ON q.rowid = x.rowid
|
|
WHERE x MATCH :q
|
|
ORDER BY bm25(x) ASC
|
|
LIMIT :limit
|
|
"""
|
|
try:
|
|
fts_q = _format_fts_query(criteria.query, criteria.exact_phrase, criteria.whole_words)
|
|
rows = db.execute(text(fts_sql), {"q": fts_q, "limit": criteria.limit}).mappings().all()
|
|
for row in rows[: criteria.limit]:
|
|
class _Q: pass
|
|
q = _Q()
|
|
for k, v in row.items():
|
|
setattr(q, k, v)
|
|
results.append(SearchResult(
|
|
type="qdro",
|
|
id=row["id"],
|
|
title=row.get("form_name") or f"QDRO v{row.get('version')}",
|
|
description=f"File: {row.get('file_no')} | Status: {row.get('status')} | Case: {row.get('case_number') or 'N/A'}",
|
|
url=f"/documents?qdro_id={row['id']}",
|
|
metadata={
|
|
"file_no": row.get("file_no"),
|
|
"version": row.get("version"),
|
|
"status": row.get("status"),
|
|
"petitioner": row.get("pet"),
|
|
"respondent": row.get("res"),
|
|
"case_number": row.get("case_number"),
|
|
},
|
|
relevance_score=1.0,
|
|
highlight=_create_qdro_highlight(q, criteria.query or ""),
|
|
created_at=row.get("created_at"),
|
|
updated_at=row.get("updated_at"),
|
|
))
|
|
return results
|
|
except Exception:
|
|
pass
|
|
|
|
# Fallback legacy path
|
|
query = db.query(QDRO).options(joinedload(QDRO.file))
|
|
|
|
if criteria.query:
|
|
search_conditions = []
|
|
if criteria.exact_phrase:
|
|
phrase = criteria.query
|
|
search_conditions.append(or_(
|
|
_like_phrase_word_boundaries(QDRO.form_name, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(QDRO.pet, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(QDRO.res, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(QDRO.case_number, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(QDRO.notes, phrase, criteria.case_sensitive),
|
|
_like_phrase_word_boundaries(QDRO.file_no, phrase, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
tokens = build_query_tokens(criteria.query)
|
|
for term in tokens:
|
|
if criteria.whole_words:
|
|
search_conditions.append(or_(
|
|
_like_whole_word(QDRO.form_name, term, criteria.case_sensitive),
|
|
_like_whole_word(QDRO.pet, term, criteria.case_sensitive),
|
|
_like_whole_word(QDRO.res, term, criteria.case_sensitive),
|
|
_like_whole_word(QDRO.case_number, term, criteria.case_sensitive),
|
|
_like_whole_word(QDRO.notes, term, criteria.case_sensitive),
|
|
_like_whole_word(QDRO.file_no, term, criteria.case_sensitive),
|
|
))
|
|
else:
|
|
if criteria.case_sensitive:
|
|
search_conditions.append(or_(
|
|
QDRO.file_no.contains(term),
|
|
QDRO.form_name.contains(term),
|
|
QDRO.pet.contains(term),
|
|
QDRO.res.contains(term),
|
|
QDRO.case_number.contains(term),
|
|
QDRO.notes.contains(term),
|
|
))
|
|
else:
|
|
search_conditions.append(or_(
|
|
QDRO.file_no.ilike(f"%{term}%"),
|
|
QDRO.form_name.ilike(f"%{term}%"),
|
|
QDRO.pet.ilike(f"%{term}%"),
|
|
QDRO.res.ilike(f"%{term}%"),
|
|
QDRO.case_number.ilike(f"%{term}%"),
|
|
QDRO.notes.ilike(f"%{term}%"),
|
|
))
|
|
if search_conditions:
|
|
query = query.filter(and_(*search_conditions))
|
|
|
|
qdros = query.limit(criteria.limit).all()
|
|
|
|
for qdro in qdros:
|
|
relevance = _calculate_qdro_relevance(qdro, criteria.query or "")
|
|
highlight = _create_qdro_highlight(qdro, criteria.query or "")
|
|
results.append(SearchResult(
|
|
type="qdro",
|
|
id=qdro.id,
|
|
title=qdro.form_name or f"QDRO v{qdro.version}",
|
|
description=f"File: {qdro.file_no} | Status: {qdro.status} | Case: {qdro.case_number or 'N/A'}",
|
|
url=f"/documents?qdro_id={qdro.id}",
|
|
metadata={
|
|
"file_no": qdro.file_no,
|
|
"version": qdro.version,
|
|
"status": qdro.status,
|
|
"petitioner": qdro.pet,
|
|
"respondent": qdro.res,
|
|
"case_number": qdro.case_number,
|
|
},
|
|
relevance_score=relevance,
|
|
highlight=highlight,
|
|
created_at=qdro.created_at,
|
|
updated_at=qdro.updated_at,
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
async def _search_documents(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search document templates and forms"""
|
|
query = db.query(FormIndex)
|
|
|
|
if criteria.query:
|
|
search_terms = criteria.query.split()
|
|
search_conditions = []
|
|
|
|
for term in search_terms:
|
|
if criteria.case_sensitive:
|
|
search_conditions.append(
|
|
or_(
|
|
FormIndex.form_id.contains(term),
|
|
FormIndex.form_name.contains(term),
|
|
FormIndex.category.contains(term)
|
|
)
|
|
)
|
|
else:
|
|
search_conditions.append(
|
|
or_(
|
|
FormIndex.form_id.ilike(f"%{term}%"),
|
|
FormIndex.form_name.ilike(f"%{term}%"),
|
|
FormIndex.category.ilike(f"%{term}%")
|
|
)
|
|
)
|
|
|
|
if search_conditions:
|
|
query = query.filter(and_(*search_conditions))
|
|
|
|
if criteria.active_only:
|
|
query = query.filter(FormIndex.active == True)
|
|
|
|
documents = query.limit(criteria.limit).all()
|
|
|
|
results = []
|
|
for doc in documents:
|
|
relevance = _calculate_document_relevance(doc, criteria.query or "")
|
|
|
|
results.append(SearchResult(
|
|
type="document",
|
|
id=doc.form_id,
|
|
title=doc.form_name,
|
|
description=f"Template ID: {doc.form_id} | Category: {doc.category}",
|
|
url=f"/documents?template_id={doc.form_id}",
|
|
metadata={
|
|
"form_id": doc.form_id,
|
|
"category": doc.category,
|
|
"active": doc.active
|
|
},
|
|
relevance_score=relevance,
|
|
created_at=doc.created_at,
|
|
updated_at=doc.updated_at
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
async def _search_templates(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search templates (alias for documents)"""
|
|
return await _search_documents(criteria, db)
|
|
|
|
|
|
async def _search_phones(criteria: AdvancedSearchCriteria, db: Session) -> List[SearchResult]:
|
|
"""Search phone numbers"""
|
|
query = db.query(Phone).options(
|
|
Load(Phone).load_only(
|
|
Phone.id,
|
|
Phone.phone,
|
|
Phone.location,
|
|
Phone.rolodex_id,
|
|
Phone.created_at,
|
|
Phone.updated_at,
|
|
),
|
|
joinedload(Phone.rolodex_entry).load_only(Rolodex.first, Rolodex.last),
|
|
)
|
|
|
|
if criteria.query:
|
|
# Clean phone number for search (remove non-digits)
|
|
clean_query = re.sub(r'[^\d]', '', criteria.query)
|
|
|
|
query = query.filter(
|
|
or_(
|
|
Phone.phone.contains(criteria.query),
|
|
Phone.phone.contains(clean_query),
|
|
Phone.location.ilike(f"%{criteria.query}%")
|
|
)
|
|
)
|
|
|
|
phones = query.limit(criteria.limit).all()
|
|
|
|
results = []
|
|
for phone in phones:
|
|
owner_name = ""
|
|
if phone.rolodex_entry:
|
|
owner_name = f"{phone.rolodex_entry.first or ''} {phone.rolodex_entry.last}".strip()
|
|
|
|
results.append(SearchResult(
|
|
type="phone",
|
|
id=f"{phone.id}_{phone.phone}",
|
|
title=phone.phone,
|
|
description=f"Owner: {owner_name} | Location: {phone.location or 'Unknown'}",
|
|
url=f"/customers?id={phone.rolodex_id}",
|
|
metadata={
|
|
"owner_id": phone.rolodex_id,
|
|
"owner_name": owner_name,
|
|
"location": phone.location
|
|
},
|
|
relevance_score=1.0,
|
|
created_at=phone.created_at,
|
|
updated_at=phone.updated_at
|
|
))
|
|
|
|
return results
|
|
|
|
|
|
# Utility Functions
|
|
|
|
def _sort_search_results(results: List[SearchResult], sort_by: str, sort_order: str) -> List[SearchResult]:
|
|
"""Sort search results based on criteria"""
|
|
reverse = sort_order == "desc"
|
|
|
|
if sort_by == "relevance":
|
|
return sorted(results, key=lambda x: x.relevance_score or 0, reverse=reverse)
|
|
elif sort_by == "date":
|
|
return sorted(results, key=lambda x: x.updated_at or datetime.min, reverse=reverse)
|
|
elif sort_by == "amount":
|
|
return sorted(results, key=lambda x: x.metadata.get("amount", 0) if x.metadata else 0, reverse=reverse)
|
|
elif sort_by == "title":
|
|
return sorted(results, key=lambda x: x.title, reverse=reverse)
|
|
else:
|
|
return results
|
|
|
|
|
|
def _calculate_facets(results: List[SearchResult]) -> Dict[str, Dict[str, int]]:
|
|
"""Calculate facets from search results"""
|
|
facets = {
|
|
"type": {},
|
|
"file_type": {},
|
|
"status": {},
|
|
"employee": {},
|
|
"category": {},
|
|
"state": {},
|
|
"transaction_type": {},
|
|
}
|
|
|
|
for result in results:
|
|
# Type facet
|
|
facets["type"][result.type] = facets["type"].get(result.type, 0) + 1
|
|
|
|
# Metadata facets
|
|
if result.metadata:
|
|
for facet_key in ["file_type", "status", "employee", "category", "state", "transaction_type"]:
|
|
if facet_key in result.metadata:
|
|
value = result.metadata[facet_key]
|
|
if value:
|
|
facets[facet_key][value] = facets[facet_key].get(value, 0) + 1
|
|
|
|
return facets
|
|
|
|
|
|
async def _calculate_search_stats(db: Session, execution_time: float) -> SearchStats:
|
|
"""Calculate search statistics"""
|
|
total_customers = db.query(Rolodex).count()
|
|
total_files = db.query(File).count()
|
|
total_ledger_entries = db.query(Ledger).count()
|
|
total_qdros = db.query(QDRO).count()
|
|
total_documents = db.query(FormIndex).count()
|
|
total_templates = db.query(FormIndex).count()
|
|
total_phones = db.query(Phone).count()
|
|
|
|
return SearchStats(
|
|
total_customers=total_customers,
|
|
total_files=total_files,
|
|
total_ledger_entries=total_ledger_entries,
|
|
total_qdros=total_qdros,
|
|
total_documents=total_documents,
|
|
total_templates=total_templates,
|
|
total_phones=total_phones,
|
|
search_execution_time=execution_time
|
|
)
|
|
|
|
|
|
# Relevance calculation functions
|
|
def _calculate_customer_relevance(customer: Rolodex, query: str) -> float:
|
|
"""Calculate relevance score for customer"""
|
|
if not query:
|
|
return 1.0
|
|
|
|
score = 0.0
|
|
query_lower = query.lower()
|
|
|
|
# Exact matches get higher scores
|
|
full_name = f"{customer.first or ''} {customer.last}".strip().lower()
|
|
if query_lower == full_name:
|
|
score += 10.0
|
|
elif query_lower in full_name:
|
|
score += 5.0
|
|
|
|
# ID matches
|
|
if query_lower == (customer.id or "").lower():
|
|
score += 8.0
|
|
elif query_lower in (customer.id or "").lower():
|
|
score += 3.0
|
|
|
|
# Email matches
|
|
if customer.email and query_lower in customer.email.lower():
|
|
score += 4.0
|
|
|
|
# City matches
|
|
if customer.city and query_lower in customer.city.lower():
|
|
score += 2.0
|
|
|
|
return max(score, 0.1) # Minimum score
|
|
|
|
|
|
def _calculate_file_relevance(file_obj: File, query: str) -> float:
|
|
"""Calculate relevance score for file"""
|
|
if not query:
|
|
return 1.0
|
|
|
|
score = 0.0
|
|
query_lower = query.lower()
|
|
|
|
# File number exact match
|
|
if query_lower == (file_obj.file_no or "").lower():
|
|
score += 10.0
|
|
elif query_lower in (file_obj.file_no or "").lower():
|
|
score += 5.0
|
|
|
|
# Client ID match
|
|
if query_lower == (file_obj.id or "").lower():
|
|
score += 8.0
|
|
|
|
# Regarding field
|
|
if file_obj.regarding and query_lower in file_obj.regarding.lower():
|
|
score += 4.0
|
|
|
|
# File type
|
|
if file_obj.file_type and query_lower in file_obj.file_type.lower():
|
|
score += 3.0
|
|
|
|
return max(score, 0.1)
|
|
|
|
|
|
def _calculate_ledger_relevance(ledger: Ledger, query: str) -> float:
|
|
"""Calculate relevance score for ledger entry"""
|
|
if not query:
|
|
return 1.0
|
|
|
|
score = 0.0
|
|
query_lower = query.lower()
|
|
|
|
# File number match
|
|
if query_lower == (ledger.file_no or "").lower():
|
|
score += 8.0
|
|
elif query_lower in (ledger.file_no or "").lower():
|
|
score += 4.0
|
|
|
|
# Transaction code match
|
|
if query_lower == (ledger.t_code or "").lower():
|
|
score += 6.0
|
|
|
|
# Note content
|
|
if ledger.note and query_lower in ledger.note.lower():
|
|
score += 3.0
|
|
|
|
return max(score, 0.1)
|
|
|
|
|
|
def _calculate_qdro_relevance(qdro: QDRO, query: str) -> float:
|
|
"""Calculate relevance score for QDRO"""
|
|
if not query:
|
|
return 1.0
|
|
|
|
score = 0.0
|
|
query_lower = query.lower()
|
|
|
|
# Form name exact match
|
|
if qdro.form_name and query_lower == qdro.form_name.lower():
|
|
score += 10.0
|
|
elif qdro.form_name and query_lower in qdro.form_name.lower():
|
|
score += 5.0
|
|
|
|
# Party names
|
|
if qdro.pet and query_lower in qdro.pet.lower():
|
|
score += 6.0
|
|
|
|
if qdro.res and query_lower in qdro.res.lower():
|
|
score += 6.0
|
|
|
|
# Case number
|
|
if qdro.case_number and query_lower in qdro.case_number.lower():
|
|
score += 4.0
|
|
|
|
return max(score, 0.1)
|
|
|
|
|
|
def _calculate_document_relevance(doc: FormIndex, query: str) -> float:
|
|
"""Calculate relevance score for document"""
|
|
if not query:
|
|
return 1.0
|
|
|
|
score = 0.0
|
|
query_lower = query.lower()
|
|
|
|
# Form ID exact match
|
|
if query_lower == (doc.form_id or "").lower():
|
|
score += 10.0
|
|
|
|
# Form name match
|
|
if doc.form_name and query_lower in doc.form_name.lower():
|
|
score += 5.0
|
|
|
|
# Category match
|
|
if doc.category and query_lower in doc.category.lower():
|
|
score += 3.0
|
|
|
|
return max(score, 0.1)
|
|
|
|
|
|
# Highlight functions
|
|
def _create_customer_highlight(customer: Rolodex, query: str) -> str:
|
|
return create_customer_highlight(customer, query)
|
|
|
|
|
|
def _create_file_highlight(file_obj: File, query: str) -> str:
|
|
return create_file_highlight(file_obj, query)
|
|
|
|
|
|
def _create_ledger_highlight(ledger: Ledger, query: str) -> str:
|
|
return create_ledger_highlight(ledger, query)
|
|
|
|
|
|
def _create_qdro_highlight(qdro: QDRO, query: str) -> str:
|
|
return create_qdro_highlight(qdro, query) |