fixed sort time

This commit is contained in:
HotSwapp
2025-10-14 07:56:13 -05:00
parent 9b2ce0d28f
commit 65e4995a5b
26 changed files with 99601 additions and 28 deletions

Binary file not shown.

View File

@@ -147,6 +147,37 @@ def create_tables() -> None:
# Handle case where auth module isn't available yet during initial import
pass
# Create helpful SQLite indexes for rolodex sorting if they do not exist
try:
if "sqlite" in DATABASE_URL:
index_ddls = [
# Name sort: NULLS LAST emulation terms first then values
"CREATE INDEX IF NOT EXISTS ix_clients_name_sort ON clients((last_name IS NULL), last_name, (first_name IS NULL), first_name)",
# Company/address/city/state/zip
"CREATE INDEX IF NOT EXISTS ix_clients_company_sort ON clients((company IS NULL), company)",
"CREATE INDEX IF NOT EXISTS ix_clients_address_sort ON clients((address IS NULL), address)",
"CREATE INDEX IF NOT EXISTS ix_clients_city_sort ON clients((city IS NULL), city)",
"CREATE INDEX IF NOT EXISTS ix_clients_state_sort ON clients((state IS NULL), state)",
"CREATE INDEX IF NOT EXISTS ix_clients_zip_sort ON clients((zip_code IS NULL), zip_code)",
# Updated sort via COALESCE(updated_at, created_at)
"CREATE INDEX IF NOT EXISTS ix_clients_updated_sort ON clients(COALESCE(updated_at, created_at))",
# Phone MIN(phone_number) correlated subquery helper
"CREATE INDEX IF NOT EXISTS ix_phones_client_phone ON phones(client_id, phone_number)",
]
with engine.begin() as conn:
for ddl in index_ddls:
conn.execute(text(ddl))
except Exception as e:
try:
from .logging_config import setup_logging
import structlog
setup_logging()
_logger = structlog.get_logger(__name__)
_logger.warning("sqlite_index_creation_failed", error=str(e))
except Exception:
pass
def get_database_url() -> str:
"""

View File

@@ -41,11 +41,27 @@ def open_text_with_fallbacks(file_path: str):
last_error = None
for enc in encodings:
try:
f = open(file_path, 'r', encoding=enc, errors='strict', newline='')
# Read more than 1KB to catch encoding issues deeper in the file
# Many legacy CSVs have issues beyond the first few rows
_ = f.read(51200) # Read 50KB to test (increased from 20KB)
f.seek(0)
# First open in strict mode just for a quick sanity check on the first
# chunk of the file. We do *not* keep this handle because a later
# unexpected character could still trigger a UnicodeDecodeError when
# the CSV iterator continues reading. After the quick check we
# immediately close the handle and reopen with `errors="replace"`
# which guarantees that *any* undecodable bytes that appear further
# down will be replaced with the official Unicode replacement
# character (U+FFFD) instead of raising an exception and aborting the
# import. This keeps the import pipeline resilient while still
# letting us log the originally detected encoding for auditing.
test_f = open(file_path, 'r', encoding=enc, errors='strict', newline='')
# Read 50 KB from the start of the file enough to catch the vast
# majority of encoding problems without loading the entire file into
# memory.
_ = test_f.read(51200)
test_f.close()
# Re-open for the real CSV processing pass using a forgiving error
# strategy.
f = open(file_path, 'r', encoding=enc, errors='replace', newline='')
logger.info("csv_open_encoding_selected", file=file_path, encoding=enc)
return f, enc
except Exception as e:
@@ -124,10 +140,25 @@ def parse_decimal(value: str) -> Optional[Decimal]:
def clean_string(value: str) -> Optional[str]:
"""Clean string value, return None if blank."""
if not value or not value.strip():
"""Return a sanitized string or None if blank/only junk.
• Strips leading/trailing whitespace
• Removes Unicode replacement characters ( / U+FFFD) introduced by our
liberal decoder
• Removes ASCII control characters (0x00-0x1F, 0x7F)
"""
if not value:
return None
return value.strip()
# Remove replacement chars created by errors="replace" decoding
cleaned = value.replace("", "").replace("\uFFFD", "")
# Strip out remaining control chars
cleaned = "".join(ch for ch in cleaned if ch >= " " and ch != "\x7f")
cleaned = cleaned.strip()
return cleaned or None
# ============================================================================
@@ -1522,15 +1553,51 @@ def import_planinfo(db: Session, file_path: str) -> Dict[str, Any]:
f, encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
batch = []
# Fetch once to avoid many round-trips
existing_ids: set[str] = {
pid for (pid,) in db.query(PlanInfo.plan_id).all()
}
batch: list[PlanInfo] = []
updating: list[PlanInfo] = []
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
plan_id = clean_string(row.get('Plan_Id'))
# Skip rows where plan_id is missing or clearly corrupted (contains replacement character)
if not plan_id:
# Record as warning so user can review later
result['errors'].append(
f"Row {row_num}: skipped due to invalid plan_id '{plan_id}'"
)
continue
if plan_id in existing_ids:
# Update existing record in place (UPSERT)
rec: PlanInfo = db.query(PlanInfo).filter_by(plan_id=plan_id).first()
if rec:
rec.plan_name = clean_string(row.get('Plan_Name'))
rec.plan_type = clean_string(row.get('Plan_Type'))
rec.empl_id_no = clean_string(row.get('Empl_Id_No'))
rec.plan_no = clean_string(row.get('Plan_No'))
rec.nra = clean_string(row.get('NRA'))
rec.era = clean_string(row.get('ERA'))
rec.errf = clean_string(row.get('ERRF'))
rec.colas = clean_string(row.get('COLAS'))
rec.divided_by = clean_string(row.get('Divided_By'))
rec.drafted = clean_string(row.get('Drafted'))
rec.benefit_c = clean_string(row.get('Benefit_C'))
rec.qdro_c = clean_string(row.get('QDRO_C'))
rec.rev = clean_string(row.get('^REV'))
rec.pa = clean_string(row.get('^PA'))
rec.form_name = clean_string(row.get('Form_Name'))
rec.drafted_on = parse_date(row.get('Drafted_On'))
rec.memo = clean_string(row.get('Memo'))
updating.append(rec)
continue
record = PlanInfo(
plan_id=plan_id,
plan_name=clean_string(row.get('Plan_Name')),
@@ -1552,6 +1619,9 @@ def import_planinfo(db: Session, file_path: str) -> Dict[str, Any]:
memo=clean_string(row.get('Memo'))
)
batch.append(record)
# Track to prevent duplicates within same import
existing_ids.add(plan_id)
if len(batch) >= BATCH_SIZE:
db.bulk_save_objects(batch)
@@ -1562,6 +1632,10 @@ def import_planinfo(db: Session, file_path: str) -> Dict[str, Any]:
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
# First flush updates if any
if updating:
db.commit()
if batch:
db.bulk_save_objects(batch)
db.commit()

View File

@@ -21,7 +21,7 @@ from starlette.middleware.sessions import SessionMiddleware
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from sqlalchemy.orm import Session, joinedload
from sqlalchemy.orm import Session, selectinload, joinedload
from sqlalchemy import or_, and_, func as sa_func, select
from dotenv import load_dotenv
from starlette.middleware.base import BaseHTTPMiddleware
@@ -2709,8 +2709,8 @@ async def rolodex_list(
request.session["rolodex_sort"] = {"key": chosen_sort_key, "direction": chosen_sort_dir}
# Eager-load phones to avoid N+1 in template
query = db.query(Client).options(joinedload(Client.phones))
# Eager-load phones to avoid N+1 in template; use selectinload to avoid join explosion
query = db.query(Client).options(selectinload(Client.phones))
if q:
like = f"%{q}%"
@@ -2782,7 +2782,8 @@ async def rolodex_list(
query = query.order_by(*order_map[chosen_sort_key][chosen_sort_dir])
total: int = query.count()
# Count without ORDER BY for performance on SQLite
total: int = query.order_by(None).count()
total_pages: int = (total + page_size - 1) // page_size if total > 0 else 1
if page > total_pages:
page = total_pages

View File

@@ -19,6 +19,8 @@
</div>
<div class="col-auto">
<input type="hidden" name="page_size" value="{{ page_size }}">
<input type="hidden" name="sort_key" value="{{ sort_key }}">
<input type="hidden" name="sort_dir" value="{{ sort_dir }}">
<button class="btn btn-outline-primary" type="submit">
<i class="bi bi-search me-1"></i>Search
</button>
@@ -28,6 +30,26 @@
<i class="bi bi-x-circle me-1"></i>Clear
</a>
</div>
<div class="col-auto">
<div class="btn-group" role="group" aria-label="Sort">
<button type="button" class="btn btn-outline-secondary dropdown-toggle d-inline-flex align-items-center gap-1" data-bs-toggle="dropdown" aria-expanded="false">
<i class="bi bi-arrow-down-up"></i>
<span>{{ sort_labels[sort_key] if sort_labels and sort_key in sort_labels else 'Sort' }}</span>
</button>
<ul class="dropdown-menu">
{% for key, label in sort_labels.items() %}
<li>
<a class="dropdown-item d-flex justify-content-between align-items-center js-sort-option" href="#" data-sort-key="{{ key }}">
<span>{{ label }}</span>
{% if sort_key == key %}
<i class="bi bi-check"></i>
{% endif %}
</a>
</li>
{% endfor %}
</ul>
</div>
</div>
<div class="col-auto">
<a class="btn btn-primary" href="/rolodex/new">
<i class="bi bi-plus-lg me-1"></i>New Client
@@ -39,31 +61,40 @@
<div class="col-12">
<div class="table-responsive">
{% set headers = [
{ 'title': 'Name', 'width': '220px' },
{ 'title': 'Company' },
{ 'title': 'Address' },
{ 'title': 'City' },
{ 'title': 'State', 'width': '80px' },
{ 'title': 'ZIP', 'width': '110px' },
{ 'title': 'Phones', 'width': '200px' },
{ 'title': 'Name', 'width': '220px', 'key': 'name' },
{ 'title': 'Company', 'key': 'company' },
{ 'title': 'Address', 'key': 'address' },
{ 'title': 'City', 'key': 'city' },
{ 'title': 'State', 'width': '80px', 'key': 'state' },
{ 'title': 'ZIP', 'width': '110px', 'key': 'zip' },
{ 'title': 'Phones', 'width': '200px', 'key': 'phones' },
{ 'title': 'Actions', 'width': '140px', 'align': 'end' },
] %}
<form method="post" action="/reports/phone-book" class="js-answer-table">
<table class="table table-hover align-middle">
<table class="table table-hover align-middle js-rolodex-table" data-sort-key="{{ sort_key }}" data-sort-dir="{{ sort_dir }}">
<thead class="table-light">
<tr>
{% if enable_bulk %}
<th style="width: 40px;"><input class="form-check-input js-select-all" type="checkbox"></th>
{% endif %}
{% for h in headers %}
<th{% if h.width %} width="{{ h.width | replace('px', '') }}"{% endif %}{% if h.align == 'end' %} class="text-end"{% endif %}>{{ h.title }}</th>
<th{% if h.width %} width="{{ h.width | replace('px', '') }}"{% endif %}{% if h.align == 'end' %} class="text-end"{% endif %}>
{% if h.key %}
<button type="button" class="btn btn-link p-0 text-decoration-none text-reset d-inline-flex align-items-center gap-1 js-sort-control" data-sort-key="{{ h.key }}">
<span>{{ h.title }}</span>
<i class="sort-icon small {% if sort_key == h.key %}{% if sort_dir == 'desc' %}bi-caret-down-fill{% else %}bi-caret-up-fill{% endif %}{% else %}bi-arrow-down-up{% endif %}"></i>
</button>
{% else %}
{{ h.title }}
{% endif %}
</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% if clients and clients|length > 0 %}
{% for c in clients %}
<tr>
<tr data-updated="{{ (c.updated_at or c.created_at).isoformat() if (c.updated_at or c.created_at) else '' }}">
{% if enable_bulk %}
<td><input class="form-check-input" type="checkbox" name="client_ids" value="{{ c.id }}"></td>
{% endif %}
@@ -90,7 +121,7 @@
</tr>
{% endfor %}
{% else %}
<tr>
<tr data-empty-state="true">
<td colspan="8" class="text-center text-muted py-4">
No clients found.
<div class="small mt-1">
@@ -125,10 +156,85 @@
</div>
</div>
<div class="col-12">
{{ pagination('/rolodex', page, total_pages, page_size, {'q': q, 'phone': phone}) }}
{{ pagination('/rolodex', page, total_pages, page_size, {'q': q, 'phone': phone, 'sort_key': sort_key, 'sort_dir': sort_dir}) }}
</div>
</div>
{% block extra_scripts %}{% endblock %}
{% block extra_scripts %}
{{ super() }}
<script>
document.addEventListener('DOMContentLoaded', () => {
const table = document.querySelector('.js-rolodex-table');
if (!table) {
return;
}
const controls = document.querySelectorAll('.js-sort-control');
const menuOptions = document.querySelectorAll('.js-sort-option');
const defaultDirection = (key) => (key === 'updated' ? 'desc' : 'asc');
let currentKey = table.dataset.sortKey || null;
let currentDir = table.dataset.sortDir || null;
const updateIndicators = (activeKey, direction) => {
const normalizedDirection = direction === 'desc' ? 'desc' : 'asc';
controls.forEach((control) => {
const icon = control.querySelector('.sort-icon');
if (!icon) {
return;
}
icon.classList.remove('bi-caret-up-fill', 'bi-caret-down-fill');
if (control.dataset.sortKey === activeKey) {
icon.classList.remove('bi-arrow-down-up');
icon.classList.add(normalizedDirection === 'desc' ? 'bi-caret-down-fill' : 'bi-caret-up-fill');
} else {
icon.classList.add('bi-arrow-down-up');
}
});
};
updateIndicators(currentKey, currentDir);
controls.forEach((control) => {
control.addEventListener('click', () => {
const key = control.dataset.sortKey;
if (!key) {
return;
}
const nextDirection = currentKey === key
? (currentDir === 'asc' ? 'desc' : 'asc')
: defaultDirection(key);
const url = new URL(window.location.href);
url.searchParams.set('sort_key', key);
url.searchParams.set('sort_dir', nextDirection);
url.searchParams.set('page', '1');
window.location.href = url.toString();
});
});
menuOptions.forEach((option) => {
option.addEventListener('click', (event) => {
event.preventDefault();
const key = option.dataset.sortKey;
if (!key) {
return;
}
const nextDirection = currentKey === key
? (currentDir === 'asc' ? 'desc' : 'asc')
: defaultDirection(key);
const url = new URL(window.location.href);
url.searchParams.set('sort_key', key);
url.searchParams.set('sort_dir', nextDirection);
url.searchParams.set('page', '1');
window.location.href = url.toString();
});
});
});
</script>
{% endblock %}
{% endblock %}