fixes and refactor

This commit is contained in:
HotSwapp
2025-08-14 19:16:28 -05:00
parent 5111079149
commit bfc04a6909
61 changed files with 5689 additions and 767 deletions

View File

@@ -3,7 +3,7 @@ Audit logging service
"""
import json
from typing import Dict, Any, Optional
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from sqlalchemy.orm import Session
from fastapi import Request
@@ -65,7 +65,7 @@ class AuditService:
details=details,
ip_address=ip_address,
user_agent=user_agent,
timestamp=datetime.utcnow()
timestamp=datetime.now(timezone.utc)
)
try:
@@ -76,7 +76,7 @@ class AuditService:
except Exception as e:
db.rollback()
# Log the error but don't fail the main operation
logger.error("Failed to log audit entry", error=str(e), action=action, user_id=user_id)
logger.error("Failed to log audit entry", error=str(e), action=action)
return audit_log
@staticmethod
@@ -119,7 +119,7 @@ class AuditService:
ip_address=ip_address or "unknown",
user_agent=user_agent,
success=1 if success else 0,
timestamp=datetime.utcnow(),
timestamp=datetime.now(timezone.utc),
failure_reason=failure_reason if not success else None
)
@@ -252,7 +252,7 @@ class AuditService:
Returns:
List of failed login attempts
"""
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=hours)
query = db.query(LoginAttempt).filter(
LoginAttempt.success == 0,
LoginAttempt.timestamp >= cutoff_time

98
app/services/cache.py Normal file
View File

@@ -0,0 +1,98 @@
"""
Cache utilities with optional Redis backend.
If Redis is not configured or unavailable, all functions degrade to no-ops.
"""
from __future__ import annotations
import asyncio
import json
import hashlib
from typing import Any, Optional
try:
import redis.asyncio as redis # type: ignore
except Exception: # pragma: no cover - allow running without redis installed
redis = None # type: ignore
from app.config import settings
_client: Optional["redis.Redis"] = None # type: ignore
_lock = asyncio.Lock()
async def _get_client() -> Optional["redis.Redis"]: # type: ignore
"""Lazily initialize and return a shared Redis client if enabled."""
global _client
if not getattr(settings, "redis_url", None) or not getattr(settings, "cache_enabled", False):
return None
if redis is None:
return None
if _client is not None:
return _client
async with _lock:
if _client is None:
try:
_client = redis.from_url(settings.redis_url, decode_responses=True) # type: ignore
except Exception:
_client = None
return _client
def _stable_hash(obj: Any) -> str:
data = json.dumps(obj, sort_keys=True, separators=(",", ":"))
return hashlib.sha1(data.encode("utf-8")).hexdigest()
def build_key(kind: str, user_id: Optional[str], parts: dict) -> str:
payload = {"u": user_id or "anon", "p": parts}
return f"search:{kind}:v1:{_stable_hash(payload)}"
async def cache_get_json(kind: str, user_id: Optional[str], parts: dict) -> Optional[Any]:
client = await _get_client()
if client is None:
return None
key = build_key(kind, user_id, parts)
try:
raw = await client.get(key)
if raw is None:
return None
return json.loads(raw)
except Exception:
return None
async def cache_set_json(kind: str, user_id: Optional[str], parts: dict, value: Any, ttl_seconds: int) -> None:
client = await _get_client()
if client is None:
return
key = build_key(kind, user_id, parts)
try:
await client.set(key, json.dumps(value, separators=(",", ":")), ex=ttl_seconds)
except Exception:
return
async def invalidate_prefix(prefix: str) -> None:
client = await _get_client()
if client is None:
return
try:
# Use SCAN to avoid blocking Redis
async for key in client.scan_iter(match=f"{prefix}*"):
try:
await client.delete(key)
except Exception:
pass
except Exception:
return
async def invalidate_search_cache() -> None:
# Wipe both global search and suggestions namespaces
await invalidate_prefix("search:global:")
await invalidate_prefix("search:suggestions:")

View File

@@ -0,0 +1,141 @@
from typing import Optional, List
from sqlalchemy import or_, and_, func, asc, desc
from app.models.rolodex import Rolodex
def apply_customer_filters(base_query, search: Optional[str], group: Optional[str], state: Optional[str], groups: Optional[List[str]], states: Optional[List[str]]):
"""Apply shared search and group/state filters to the provided base_query.
This helper is used by both list and export endpoints to keep logic in sync.
"""
s = (search or "").strip()
if s:
s_lower = s.lower()
tokens = [t for t in s_lower.split() if t]
contains_any = or_(
func.lower(Rolodex.id).contains(s_lower),
func.lower(Rolodex.last).contains(s_lower),
func.lower(Rolodex.first).contains(s_lower),
func.lower(Rolodex.middle).contains(s_lower),
func.lower(Rolodex.city).contains(s_lower),
func.lower(Rolodex.email).contains(s_lower),
)
name_tokens = [
or_(
func.lower(Rolodex.first).contains(tok),
func.lower(Rolodex.middle).contains(tok),
func.lower(Rolodex.last).contains(tok),
)
for tok in tokens
]
combined = contains_any if not name_tokens else or_(contains_any, and_(*name_tokens))
last_first_filter = None
if "," in s_lower:
last_part, first_part = [p.strip() for p in s_lower.split(",", 1)]
if last_part and first_part:
last_first_filter = and_(
func.lower(Rolodex.last).contains(last_part),
func.lower(Rolodex.first).contains(first_part),
)
elif last_part:
last_first_filter = func.lower(Rolodex.last).contains(last_part)
final_filter = or_(combined, last_first_filter) if last_first_filter is not None else combined
base_query = base_query.filter(final_filter)
effective_groups = [g for g in (groups or []) if g] or ([group] if group else [])
if effective_groups:
base_query = base_query.filter(Rolodex.group.in_(effective_groups))
effective_states = [s for s in (states or []) if s] or ([state] if state else [])
if effective_states:
base_query = base_query.filter(Rolodex.abrev.in_(effective_states))
return base_query
def apply_customer_sorting(base_query, sort_by: Optional[str], sort_dir: Optional[str]):
"""Apply shared sorting to the provided base_query.
Supported fields: id, name (last,first), city (city,state), email.
Unknown fields fall back to id. Sorting is case-insensitive for strings.
"""
normalized_sort_by = (sort_by or "id").lower()
normalized_sort_dir = (sort_dir or "asc").lower()
is_desc = normalized_sort_dir == "desc"
order_columns = []
if normalized_sort_by == "id":
order_columns = [Rolodex.id]
elif normalized_sort_by == "name":
order_columns = [Rolodex.last, Rolodex.first]
elif normalized_sort_by == "city":
order_columns = [Rolodex.city, Rolodex.abrev]
elif normalized_sort_by == "email":
order_columns = [Rolodex.email]
else:
order_columns = [Rolodex.id]
ordered = []
for col in order_columns:
try:
expr = func.lower(col) if col.type.python_type in (str,) else col # type: ignore[attr-defined]
except Exception:
expr = col
ordered.append(desc(expr) if is_desc else asc(expr))
if ordered:
base_query = base_query.order_by(*ordered)
return base_query
def prepare_customer_csv_rows(customers: List[Rolodex], fields: Optional[List[str]]):
"""Prepare CSV header and rows for the given customers and requested fields.
Returns a tuple: (header_row, rows), where header_row is a list of column
titles and rows is a list of row lists ready to be written by csv.writer.
"""
allowed_fields_in_order = ["id", "name", "group", "city", "state", "phone", "email"]
header_names = {
"id": "Customer ID",
"name": "Name",
"group": "Group",
"city": "City",
"state": "State",
"phone": "Primary Phone",
"email": "Email",
}
requested = [f.lower() for f in (fields or []) if isinstance(f, str)]
selected_fields = [f for f in allowed_fields_in_order if f in requested] if requested else allowed_fields_in_order
if not selected_fields:
selected_fields = allowed_fields_in_order
header_row = [header_names[f] for f in selected_fields]
rows: List[List[str]] = []
for c in customers:
full_name = f"{(c.first or '').strip()} {(c.last or '').strip()}".strip()
primary_phone = ""
try:
if getattr(c, "phone_numbers", None):
primary_phone = c.phone_numbers[0].phone or ""
except Exception:
primary_phone = ""
row_map = {
"id": c.id,
"name": full_name,
"group": c.group or "",
"city": c.city or "",
"state": c.abrev or "",
"phone": primary_phone,
"email": c.email or "",
}
rows.append([row_map[f] for f in selected_fields])
return header_row, rows

127
app/services/mortality.py Normal file
View File

@@ -0,0 +1,127 @@
"""
Mortality/Life table utilities.
Helpers to query `life_tables` and `number_tables` by age/month and
return values filtered by sex/race using compact codes:
- sex: M, F, A (All)
- race: W (White), B (Black), H (Hispanic), A (All)
Column naming in tables follows the pattern:
- LifeTable: le_{race}{sex}, na_{race}{sex}
- NumberTable: na_{race}{sex}
Examples:
- race=W, sex=M => suffix "wm" (columns `le_wm`, `na_wm`)
- race=A, sex=F => suffix "af" (columns `le_af`, `na_af`)
- race=H, sex=A => suffix "ha" (columns `le_ha`, `na_ha`)
"""
from __future__ import annotations
from typing import Dict, Optional, Tuple
from sqlalchemy.orm import Session
from app.models.pensions import LifeTable, NumberTable
_RACE_MAP: Dict[str, str] = {
"W": "w", # White
"B": "b", # Black
"H": "h", # Hispanic
"A": "a", # All races
}
_SEX_MAP: Dict[str, str] = {
"M": "m",
"F": "f",
"A": "a", # All sexes
}
class InvalidCodeError(ValueError):
pass
def _normalize_codes(sex: str, race: str) -> Tuple[str, str, str]:
"""Validate/normalize sex and race to construct the column suffix.
Returns (suffix, sex_u, race_u) where suffix is lowercase like "wm".
Raises InvalidCodeError on invalid inputs.
"""
sex_u = (sex or "").strip().upper()
race_u = (race or "").strip().upper()
if sex_u not in _SEX_MAP:
raise InvalidCodeError(f"Invalid sex code '{sex}'. Expected one of: {', '.join(_SEX_MAP.keys())}")
if race_u not in _RACE_MAP:
raise InvalidCodeError(f"Invalid race code '{race}'. Expected one of: {', '.join(_RACE_MAP.keys())}")
return _RACE_MAP[race_u] + _SEX_MAP[sex_u], sex_u, race_u
def get_life_values(
db: Session,
*,
age: int,
sex: str,
race: str,
) -> Optional[Dict[str, Optional[float]]]:
"""Return life table LE and NA values for a given age, sex, and race.
Returns dict: {"age": int, "sex": str, "race": str, "le": float|None, "na": float|None}
Returns None if the age row does not exist.
Raises InvalidCodeError for invalid codes.
"""
suffix, sex_u, race_u = _normalize_codes(sex, race)
row: Optional[LifeTable] = db.query(LifeTable).filter(LifeTable.age == age).first()
if not row:
return None
le_col = f"le_{suffix}"
na_col = f"na_{suffix}"
le_val = getattr(row, le_col, None)
na_val = getattr(row, na_col, None)
return {
"age": int(age),
"sex": sex_u,
"race": race_u,
"le": float(le_val) if le_val is not None else None,
"na": float(na_val) if na_val is not None else None,
}
def get_number_value(
db: Session,
*,
month: int,
sex: str,
race: str,
) -> Optional[Dict[str, Optional[float]]]:
"""Return number table NA value for a given month, sex, and race.
Returns dict: {"month": int, "sex": str, "race": str, "na": float|None}
Returns None if the month row does not exist.
Raises InvalidCodeError for invalid codes.
"""
suffix, sex_u, race_u = _normalize_codes(sex, race)
row: Optional[NumberTable] = db.query(NumberTable).filter(NumberTable.month == month).first()
if not row:
return None
na_col = f"na_{suffix}"
na_val = getattr(row, na_col, None)
return {
"month": int(month),
"sex": sex_u,
"race": race_u,
"na": float(na_val) if na_val is not None else None,
}
__all__ = [
"InvalidCodeError",
"get_life_values",
"get_number_value",
]

View File

@@ -0,0 +1,72 @@
from typing import Iterable, Optional, Sequence
from sqlalchemy import or_, and_, asc, desc, func
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy.sql.schema import Column
def tokenized_ilike_filter(tokens: Sequence[str], columns: Sequence[Column]) -> Optional[BinaryExpression]:
"""Build an AND-of-ORs case-insensitive LIKE filter across columns for each token.
Example: AND(OR(col1 ILIKE %t1%, col2 ILIKE %t1%), OR(col1 ILIKE %t2%, ...))
Returns None when tokens or columns are empty.
"""
if not tokens or not columns:
return None
per_token_clauses = []
for term in tokens:
term = str(term or "").strip()
if not term:
continue
per_token_clauses.append(or_(*[c.ilike(f"%{term}%") for c in columns]))
if not per_token_clauses:
return None
return and_(*per_token_clauses)
def apply_pagination(query, skip: int, limit: int):
"""Apply offset/limit pagination to a SQLAlchemy query in a DRY way."""
return query.offset(skip).limit(limit)
def paginate_with_total(query, skip: int, limit: int, include_total: bool):
"""Return (items, total|None) applying pagination and optionally counting total.
This avoids duplicating count + pagination logic at each endpoint.
"""
total_count = query.count() if include_total else None
items = apply_pagination(query, skip, limit).all()
return items, total_count
def apply_sorting(query, sort_by: Optional[str], sort_dir: Optional[str], allowed: dict[str, list[Column]]):
"""Apply case-insensitive sorting per a whitelist of allowed fields.
allowed: mapping from field name -> list of columns to sort by, in priority order.
For string columns, compares using lower(column) for stable ordering.
Unknown sort_by falls back to the first key in allowed.
sort_dir: "asc" or "desc" (default asc)
"""
if not allowed:
return query
normalized_sort_by = (sort_by or next(iter(allowed.keys()))).lower()
normalized_sort_dir = (sort_dir or "asc").lower()
is_desc = normalized_sort_dir == "desc"
columns = allowed.get(normalized_sort_by)
if not columns:
columns = allowed.get(next(iter(allowed.keys())))
if not columns:
return query
order_exprs = []
for col in columns:
try:
expr = func.lower(col) if getattr(col.type, "python_type", str) is str else col
except Exception:
expr = col
order_exprs.append(desc(expr) if is_desc else asc(expr))
if order_exprs:
query = query.order_by(*order_exprs)
return query