coming together
This commit is contained in:
@@ -1,10 +1,13 @@
|
||||
"""
|
||||
Customer (Rolodex) API endpoints
|
||||
"""
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Union
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query
|
||||
from sqlalchemy.orm import Session, joinedload
|
||||
from sqlalchemy import or_, func
|
||||
from sqlalchemy import or_, and_, func, asc, desc
|
||||
from fastapi.responses import StreamingResponse
|
||||
import csv
|
||||
import io
|
||||
|
||||
from app.database.base import get_db
|
||||
from app.models.rolodex import Rolodex, Phone
|
||||
@@ -169,36 +172,263 @@ async def get_customer_stats(
|
||||
}
|
||||
|
||||
|
||||
@router.get("/", response_model=List[CustomerResponse])
|
||||
class PaginatedCustomersResponse(BaseModel):
|
||||
items: List[CustomerResponse]
|
||||
total: int
|
||||
|
||||
|
||||
@router.get("/", response_model=Union[List[CustomerResponse], PaginatedCustomersResponse])
|
||||
async def list_customers(
|
||||
skip: int = Query(0, ge=0),
|
||||
limit: int = Query(50, ge=1, le=200),
|
||||
search: Optional[str] = Query(None),
|
||||
group: Optional[str] = Query(None, description="Filter by customer group (exact match)"),
|
||||
state: Optional[str] = Query(None, description="Filter by state abbreviation (exact match)"),
|
||||
groups: Optional[List[str]] = Query(None, description="Filter by multiple groups (repeat param)"),
|
||||
states: Optional[List[str]] = Query(None, description="Filter by multiple states (repeat param)"),
|
||||
sort_by: Optional[str] = Query("id", description="Sort field: id, name, city, email"),
|
||||
sort_dir: Optional[str] = Query("asc", description="Sort direction: asc or desc"),
|
||||
include_total: bool = Query(False, description="When true, returns {items, total} instead of a plain list"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_user)
|
||||
):
|
||||
"""List customers with pagination and search"""
|
||||
try:
|
||||
query = db.query(Rolodex).options(joinedload(Rolodex.phone_numbers))
|
||||
base_query = db.query(Rolodex)
|
||||
|
||||
if search:
|
||||
query = query.filter(
|
||||
or_(
|
||||
Rolodex.id.contains(search),
|
||||
Rolodex.last.contains(search),
|
||||
Rolodex.first.contains(search),
|
||||
Rolodex.city.contains(search),
|
||||
Rolodex.email.contains(search)
|
||||
)
|
||||
s = (search or "").strip()
|
||||
s_lower = s.lower()
|
||||
tokens = [t for t in s_lower.split() if t]
|
||||
# Basic contains search on several fields (case-insensitive)
|
||||
contains_any = or_(
|
||||
func.lower(Rolodex.id).contains(s_lower),
|
||||
func.lower(Rolodex.last).contains(s_lower),
|
||||
func.lower(Rolodex.first).contains(s_lower),
|
||||
func.lower(Rolodex.middle).contains(s_lower),
|
||||
func.lower(Rolodex.city).contains(s_lower),
|
||||
func.lower(Rolodex.email).contains(s_lower),
|
||||
)
|
||||
|
||||
customers = query.offset(skip).limit(limit).all()
|
||||
# Multi-token name support: every token must match either first, middle, or last
|
||||
name_tokens = [
|
||||
or_(
|
||||
func.lower(Rolodex.first).contains(tok),
|
||||
func.lower(Rolodex.middle).contains(tok),
|
||||
func.lower(Rolodex.last).contains(tok),
|
||||
)
|
||||
for tok in tokens
|
||||
]
|
||||
combined = contains_any if not name_tokens else or_(contains_any, and_(*name_tokens))
|
||||
# Comma pattern: "Last, First"
|
||||
last_first_filter = None
|
||||
if "," in s_lower:
|
||||
last_part, first_part = [p.strip() for p in s_lower.split(",", 1)]
|
||||
if last_part and first_part:
|
||||
last_first_filter = and_(
|
||||
func.lower(Rolodex.last).contains(last_part),
|
||||
func.lower(Rolodex.first).contains(first_part),
|
||||
)
|
||||
elif last_part:
|
||||
last_first_filter = func.lower(Rolodex.last).contains(last_part)
|
||||
final_filter = or_(combined, last_first_filter) if last_first_filter is not None else combined
|
||||
base_query = base_query.filter(final_filter)
|
||||
|
||||
# Apply group/state filters (support single and multi-select)
|
||||
effective_groups = [g for g in (groups or []) if g] or ([group] if group else [])
|
||||
if effective_groups:
|
||||
base_query = base_query.filter(Rolodex.group.in_(effective_groups))
|
||||
effective_states = [s for s in (states or []) if s] or ([state] if state else [])
|
||||
if effective_states:
|
||||
base_query = base_query.filter(Rolodex.abrev.in_(effective_states))
|
||||
|
||||
# Apply sorting (whitelisted fields only)
|
||||
normalized_sort_by = (sort_by or "id").lower()
|
||||
normalized_sort_dir = (sort_dir or "asc").lower()
|
||||
is_desc = normalized_sort_dir == "desc"
|
||||
|
||||
order_columns = []
|
||||
if normalized_sort_by == "id":
|
||||
order_columns = [Rolodex.id]
|
||||
elif normalized_sort_by == "name":
|
||||
# Sort by last, then first
|
||||
order_columns = [Rolodex.last, Rolodex.first]
|
||||
elif normalized_sort_by == "city":
|
||||
# Sort by city, then state abbreviation
|
||||
order_columns = [Rolodex.city, Rolodex.abrev]
|
||||
elif normalized_sort_by == "email":
|
||||
order_columns = [Rolodex.email]
|
||||
else:
|
||||
# Fallback to id to avoid arbitrary column injection
|
||||
order_columns = [Rolodex.id]
|
||||
|
||||
# Case-insensitive ordering where applicable, preserving None ordering default
|
||||
ordered = []
|
||||
for col in order_columns:
|
||||
# Use lower() for string-like cols; SQLAlchemy will handle non-string safely enough for SQLite/Postgres
|
||||
expr = func.lower(col) if col.type.python_type in (str,) else col # type: ignore[attr-defined]
|
||||
ordered.append(desc(expr) if is_desc else asc(expr))
|
||||
|
||||
if ordered:
|
||||
base_query = base_query.order_by(*ordered)
|
||||
|
||||
customers = base_query.options(joinedload(Rolodex.phone_numbers)).offset(skip).limit(limit).all()
|
||||
if include_total:
|
||||
total = base_query.count()
|
||||
return {"items": customers, "total": total}
|
||||
return customers
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error loading customers: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/export")
|
||||
async def export_customers(
|
||||
# Optional pagination for exporting only current page; omit to export all
|
||||
skip: Optional[int] = Query(None, ge=0),
|
||||
limit: Optional[int] = Query(None, ge=1, le=1000000),
|
||||
search: Optional[str] = Query(None),
|
||||
group: Optional[str] = Query(None, description="Filter by customer group (exact match)"),
|
||||
state: Optional[str] = Query(None, description="Filter by state abbreviation (exact match)"),
|
||||
groups: Optional[List[str]] = Query(None, description="Filter by multiple groups (repeat param)"),
|
||||
states: Optional[List[str]] = Query(None, description="Filter by multiple states (repeat param)"),
|
||||
sort_by: Optional[str] = Query("id", description="Sort field: id, name, city, email"),
|
||||
sort_dir: Optional[str] = Query("asc", description="Sort direction: asc or desc"),
|
||||
fields: Optional[List[str]] = Query(None, description="CSV fields to include: id,name,group,city,state,phone,email"),
|
||||
export_all: bool = Query(False, description="When true, ignore skip/limit and export all matches"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_user)
|
||||
):
|
||||
"""Export customers as CSV respecting search, filters, and sorting.
|
||||
If skip/limit provided, exports that slice; otherwise exports all matches.
|
||||
"""
|
||||
try:
|
||||
base_query = db.query(Rolodex)
|
||||
|
||||
if search:
|
||||
s = (search or "").strip()
|
||||
s_lower = s.lower()
|
||||
tokens = [t for t in s_lower.split() if t]
|
||||
contains_any = or_(
|
||||
func.lower(Rolodex.id).contains(s_lower),
|
||||
func.lower(Rolodex.last).contains(s_lower),
|
||||
func.lower(Rolodex.first).contains(s_lower),
|
||||
func.lower(Rolodex.middle).contains(s_lower),
|
||||
func.lower(Rolodex.city).contains(s_lower),
|
||||
func.lower(Rolodex.email).contains(s_lower),
|
||||
)
|
||||
name_tokens = [
|
||||
or_(
|
||||
func.lower(Rolodex.first).contains(tok),
|
||||
func.lower(Rolodex.middle).contains(tok),
|
||||
func.lower(Rolodex.last).contains(tok),
|
||||
)
|
||||
for tok in tokens
|
||||
]
|
||||
combined = contains_any if not name_tokens else or_(contains_any, and_(*name_tokens))
|
||||
last_first_filter = None
|
||||
if "," in s_lower:
|
||||
last_part, first_part = [p.strip() for p in s_lower.split(",", 1)]
|
||||
if last_part and first_part:
|
||||
last_first_filter = and_(
|
||||
func.lower(Rolodex.last).contains(last_part),
|
||||
func.lower(Rolodex.first).contains(first_part),
|
||||
)
|
||||
elif last_part:
|
||||
last_first_filter = func.lower(Rolodex.last).contains(last_part)
|
||||
final_filter = or_(combined, last_first_filter) if last_first_filter is not None else combined
|
||||
base_query = base_query.filter(final_filter)
|
||||
|
||||
effective_groups = [g for g in (groups or []) if g] or ([group] if group else [])
|
||||
if effective_groups:
|
||||
base_query = base_query.filter(Rolodex.group.in_(effective_groups))
|
||||
effective_states = [s for s in (states or []) if s] or ([state] if state else [])
|
||||
if effective_states:
|
||||
base_query = base_query.filter(Rolodex.abrev.in_(effective_states))
|
||||
|
||||
normalized_sort_by = (sort_by or "id").lower()
|
||||
normalized_sort_dir = (sort_dir or "asc").lower()
|
||||
is_desc = normalized_sort_dir == "desc"
|
||||
|
||||
order_columns = []
|
||||
if normalized_sort_by == "id":
|
||||
order_columns = [Rolodex.id]
|
||||
elif normalized_sort_by == "name":
|
||||
order_columns = [Rolodex.last, Rolodex.first]
|
||||
elif normalized_sort_by == "city":
|
||||
order_columns = [Rolodex.city, Rolodex.abrev]
|
||||
elif normalized_sort_by == "email":
|
||||
order_columns = [Rolodex.email]
|
||||
else:
|
||||
order_columns = [Rolodex.id]
|
||||
|
||||
ordered = []
|
||||
for col in order_columns:
|
||||
try:
|
||||
expr = func.lower(col) if col.type.python_type in (str,) else col # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
expr = col
|
||||
ordered.append(desc(expr) if is_desc else asc(expr))
|
||||
if ordered:
|
||||
base_query = base_query.order_by(*ordered)
|
||||
|
||||
if not export_all:
|
||||
if skip is not None:
|
||||
base_query = base_query.offset(skip)
|
||||
if limit is not None:
|
||||
base_query = base_query.limit(limit)
|
||||
|
||||
customers = base_query.options(joinedload(Rolodex.phone_numbers)).all()
|
||||
|
||||
# Prepare CSV
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output)
|
||||
allowed_fields_in_order = ["id", "name", "group", "city", "state", "phone", "email"]
|
||||
header_names = {
|
||||
"id": "Customer ID",
|
||||
"name": "Name",
|
||||
"group": "Group",
|
||||
"city": "City",
|
||||
"state": "State",
|
||||
"phone": "Primary Phone",
|
||||
"email": "Email",
|
||||
}
|
||||
requested = [f.lower() for f in (fields or []) if isinstance(f, str)]
|
||||
selected_fields = [f for f in allowed_fields_in_order if f in requested] if requested else allowed_fields_in_order
|
||||
if not selected_fields:
|
||||
selected_fields = allowed_fields_in_order
|
||||
writer.writerow([header_names[f] for f in selected_fields])
|
||||
for c in customers:
|
||||
full_name = f"{(c.first or '').strip()} {(c.last or '').strip()}".strip()
|
||||
primary_phone = ""
|
||||
try:
|
||||
if c.phone_numbers:
|
||||
primary_phone = c.phone_numbers[0].phone or ""
|
||||
except Exception:
|
||||
primary_phone = ""
|
||||
row_map = {
|
||||
"id": c.id,
|
||||
"name": full_name,
|
||||
"group": c.group or "",
|
||||
"city": c.city or "",
|
||||
"state": c.abrev or "",
|
||||
"phone": primary_phone,
|
||||
"email": c.email or "",
|
||||
}
|
||||
writer.writerow([row_map[f] for f in selected_fields])
|
||||
|
||||
output.seek(0)
|
||||
filename = "customers_export.csv"
|
||||
return StreamingResponse(
|
||||
output,
|
||||
media_type="text/csv",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename={filename}"
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Error exporting customers: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/{customer_id}", response_model=CustomerResponse)
|
||||
async def get_customer(
|
||||
customer_id: str,
|
||||
|
||||
@@ -118,10 +118,18 @@ async def create_qdro(
|
||||
current_user: User = Depends(get_current_user)
|
||||
):
|
||||
"""Create new QDRO"""
|
||||
qdro = QDRO(**qdro_data.model_dump())
|
||||
# Only accept fields that exist on the model and exclude None values
|
||||
allowed_fields = {c.name for c in QDRO.__table__.columns}
|
||||
payload = {
|
||||
k: v
|
||||
for k, v in qdro_data.model_dump(exclude_unset=True).items()
|
||||
if v is not None and k in allowed_fields
|
||||
}
|
||||
qdro = QDRO(**payload)
|
||||
|
||||
if not qdro.created_date:
|
||||
qdro.created_date = date.today()
|
||||
# Backfill created_date if model supports it; otherwise rely on created_at
|
||||
if hasattr(qdro, "created_date") and not getattr(qdro, "created_date"):
|
||||
setattr(qdro, "created_date", date.today())
|
||||
|
||||
db.add(qdro)
|
||||
db.commit()
|
||||
@@ -172,9 +180,11 @@ async def update_qdro(
|
||||
detail="QDRO not found"
|
||||
)
|
||||
|
||||
# Update fields
|
||||
# Update fields present on the model only
|
||||
allowed_fields = {c.name for c in QDRO.__table__.columns}
|
||||
for field, value in qdro_data.model_dump(exclude_unset=True).items():
|
||||
setattr(qdro, field, value)
|
||||
if field in allowed_fields:
|
||||
setattr(qdro, field, value)
|
||||
|
||||
db.commit()
|
||||
db.refresh(qdro)
|
||||
@@ -525,23 +535,33 @@ async def generate_document(
|
||||
document_id = str(uuid.uuid4())
|
||||
file_name = f"{template.form_name}_{file_obj.file_no}_{date.today().isoformat()}"
|
||||
|
||||
exports_dir = "/app/exports"
|
||||
try:
|
||||
os.makedirs(exports_dir, exist_ok=True)
|
||||
except Exception:
|
||||
try:
|
||||
os.makedirs("exports", exist_ok=True)
|
||||
exports_dir = "exports"
|
||||
except Exception:
|
||||
exports_dir = "."
|
||||
|
||||
if request.output_format.upper() == "PDF":
|
||||
file_path = f"/app/exports/{document_id}.pdf"
|
||||
file_path = f"{exports_dir}/{document_id}.pdf"
|
||||
file_name += ".pdf"
|
||||
# Here you would implement PDF generation
|
||||
# For now, create a simple text file
|
||||
with open(f"/app/exports/{document_id}.txt", "w") as f:
|
||||
with open(f"{exports_dir}/{document_id}.txt", "w") as f:
|
||||
f.write(merged_content)
|
||||
file_path = f"/app/exports/{document_id}.txt"
|
||||
file_path = f"{exports_dir}/{document_id}.txt"
|
||||
elif request.output_format.upper() == "DOCX":
|
||||
file_path = f"/app/exports/{document_id}.docx"
|
||||
file_path = f"{exports_dir}/{document_id}.docx"
|
||||
file_name += ".docx"
|
||||
# Implement DOCX generation
|
||||
with open(f"/app/exports/{document_id}.txt", "w") as f:
|
||||
with open(f"{exports_dir}/{document_id}.txt", "w") as f:
|
||||
f.write(merged_content)
|
||||
file_path = f"/app/exports/{document_id}.txt"
|
||||
file_path = f"{exports_dir}/{document_id}.txt"
|
||||
else: # HTML
|
||||
file_path = f"/app/exports/{document_id}.html"
|
||||
file_path = f"{exports_dir}/{document_id}.html"
|
||||
file_name += ".html"
|
||||
html_content = f"<html><body><pre>{merged_content}</pre></body></html>"
|
||||
with open(file_path, "w") as f:
|
||||
@@ -768,6 +788,9 @@ async def upload_document(
|
||||
|
||||
max_size = 10 * 1024 * 1024 # 10MB
|
||||
content = await file.read()
|
||||
# Treat zero-byte payloads as no file uploaded to provide a clearer client error
|
||||
if len(content) == 0:
|
||||
raise HTTPException(status_code=400, detail="No file uploaded")
|
||||
if len(content) > max_size:
|
||||
raise HTTPException(status_code=400, detail="File too large")
|
||||
|
||||
|
||||
@@ -294,33 +294,82 @@ async def _update_file_balances(file_obj: File, db: Session):
|
||||
async def get_recent_time_entries(
|
||||
days: int = Query(7, ge=1, le=30),
|
||||
employee: Optional[str] = Query(None),
|
||||
skip: int = Query(0, ge=0),
|
||||
status: Optional[str] = Query(None, description="billed|unbilled"),
|
||||
q: Optional[str] = Query(None, description="text search across description, file, employee, matter, client name"),
|
||||
page: int = Query(1, ge=1),
|
||||
limit: int = Query(50, ge=1, le=200),
|
||||
sort_by: str = Query("date"),
|
||||
sort_dir: str = Query("desc"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_user)
|
||||
):
|
||||
"""Get recent time entries across all files"""
|
||||
"""Get recent time entries across all files with server-side sorting and pagination"""
|
||||
cutoff_date = date.today() - timedelta(days=days)
|
||||
|
||||
query = db.query(Ledger)\
|
||||
.options(joinedload(Ledger.file).joinedload(File.owner))\
|
||||
|
||||
# Base query with joins for sorting/searching by client/matter
|
||||
base_query = db.query(Ledger) \
|
||||
.join(File, Ledger.file_no == File.file_no) \
|
||||
.outerjoin(Rolodex, File.id == Rolodex.id) \
|
||||
.options(joinedload(Ledger.file).joinedload(File.owner)) \
|
||||
.filter(and_(
|
||||
Ledger.date >= cutoff_date,
|
||||
Ledger.t_type == "2" # Time entries
|
||||
))\
|
||||
.order_by(desc(Ledger.date))
|
||||
|
||||
Ledger.t_type == "2"
|
||||
))
|
||||
|
||||
if employee:
|
||||
query = query.filter(Ledger.empl_num == employee)
|
||||
|
||||
entries = query.offset(skip).limit(limit).all()
|
||||
|
||||
base_query = base_query.filter(Ledger.empl_num == employee)
|
||||
|
||||
# Status/billed filtering
|
||||
if status:
|
||||
status_l = str(status).strip().lower()
|
||||
if status_l in ("billed", "unbilled"):
|
||||
billed_value = "Y" if status_l == "billed" else "N"
|
||||
base_query = base_query.filter(Ledger.billed == billed_value)
|
||||
|
||||
# Text search across multiple fields
|
||||
if q:
|
||||
query_text = f"%{q.strip()}%"
|
||||
base_query = base_query.filter(
|
||||
or_(
|
||||
Ledger.note.ilike(query_text),
|
||||
Ledger.file_no.ilike(query_text),
|
||||
Ledger.empl_num.ilike(query_text),
|
||||
File.regarding.ilike(query_text),
|
||||
Rolodex.first.ilike(query_text),
|
||||
Rolodex.last.ilike(query_text)
|
||||
)
|
||||
)
|
||||
|
||||
# Sorting mapping (supported columns)
|
||||
sort_map = {
|
||||
"date": Ledger.date,
|
||||
"file_no": Ledger.file_no,
|
||||
"client_name": Rolodex.last, # best-effort: sort by client last name
|
||||
"empl_num": Ledger.empl_num,
|
||||
"quantity": Ledger.quantity,
|
||||
"hours": Ledger.quantity, # alias
|
||||
"rate": Ledger.rate,
|
||||
"amount": Ledger.amount,
|
||||
"billed": Ledger.billed,
|
||||
"description": Ledger.note,
|
||||
}
|
||||
sort_column = sort_map.get(sort_by.lower(), Ledger.date)
|
||||
direction = desc if str(sort_dir).lower() == "desc" else asc
|
||||
|
||||
# Total count for pagination (distinct on Ledger.id to avoid join-induced dupes)
|
||||
total_count = base_query.with_entities(func.count(func.distinct(Ledger.id))).scalar()
|
||||
|
||||
# Apply sorting and pagination
|
||||
offset = (page - 1) * limit
|
||||
page_query = base_query.order_by(direction(sort_column)).offset(offset).limit(limit)
|
||||
entries = page_query.all()
|
||||
|
||||
# Format results with file and client information
|
||||
results = []
|
||||
for entry in entries:
|
||||
file_obj = entry.file
|
||||
client = file_obj.owner if file_obj else None
|
||||
|
||||
|
||||
results.append({
|
||||
"id": entry.id,
|
||||
"date": entry.date.isoformat(),
|
||||
@@ -334,8 +383,15 @@ async def get_recent_time_entries(
|
||||
"description": entry.note,
|
||||
"billed": entry.billed == "Y"
|
||||
})
|
||||
|
||||
return {"entries": results, "total_entries": len(results)}
|
||||
|
||||
return {
|
||||
"entries": results,
|
||||
"total_count": total_count,
|
||||
"page": page,
|
||||
"limit": limit,
|
||||
"sort_by": sort_by,
|
||||
"sort_dir": sort_dir,
|
||||
}
|
||||
|
||||
|
||||
@router.post("/time-entry/quick")
|
||||
|
||||
281
app/api/flexible.py
Normal file
281
app/api/flexible.py
Normal file
@@ -0,0 +1,281 @@
|
||||
"""
|
||||
Flexible Imports admin API: list, filter, and export unmapped rows captured during CSV imports.
|
||||
"""
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
import csv
|
||||
import io
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import func, or_, cast, String
|
||||
|
||||
from app.database.base import get_db
|
||||
from app.auth.security import get_admin_user
|
||||
from app.models.flexible import FlexibleImport
|
||||
|
||||
|
||||
router = APIRouter(prefix="/flexible", tags=["flexible"])
|
||||
|
||||
|
||||
@router.get("/imports")
|
||||
async def list_flexible_imports(
|
||||
file_type: Optional[str] = Query(None, description="Filter by CSV file type (e.g., FILES.csv)"),
|
||||
target_table: Optional[str] = Query(None, description="Filter by target model table name"),
|
||||
q: Optional[str] = Query(None, description="Quick text search across file type, target table, and unmapped data"),
|
||||
has_keys: Optional[List[str]] = Query(
|
||||
None,
|
||||
description="Filter rows where extra_data (or its 'unmapped' payload) contains these keys. Repeat param for multiple keys.",
|
||||
),
|
||||
skip: int = Query(0, ge=0),
|
||||
limit: int = Query(50, ge=1, le=500),
|
||||
db: Session = Depends(get_db),
|
||||
current_user=Depends(get_admin_user),
|
||||
):
|
||||
"""List flexible import rows with optional filtering, quick search, and pagination."""
|
||||
query = db.query(FlexibleImport)
|
||||
if file_type:
|
||||
query = query.filter(FlexibleImport.file_type == file_type)
|
||||
if target_table:
|
||||
query = query.filter(FlexibleImport.target_table == target_table)
|
||||
if q:
|
||||
pattern = f"%{q.strip()}%"
|
||||
# Search across file_type, target_table, and serialized JSON extra_data
|
||||
query = query.filter(
|
||||
or_(
|
||||
FlexibleImport.file_type.ilike(pattern),
|
||||
FlexibleImport.target_table.ilike(pattern),
|
||||
cast(FlexibleImport.extra_data, String).ilike(pattern),
|
||||
)
|
||||
)
|
||||
|
||||
# Filter by key presence inside JSON payload by string matching of the serialized JSON
|
||||
# This is DB-agnostic and works across SQLite/Postgres, though not as precise as JSON operators.
|
||||
if has_keys:
|
||||
for k in [k for k in has_keys if k is not None and str(k).strip() != ""]:
|
||||
key = str(k).strip()
|
||||
# Look for the JSON key token followed by a colon, e.g. "key":
|
||||
query = query.filter(cast(FlexibleImport.extra_data, String).ilike(f'%"{key}":%'))
|
||||
|
||||
total = query.count()
|
||||
items = (
|
||||
query.order_by(FlexibleImport.id.desc())
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
def serialize(item: FlexibleImport) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": item.id,
|
||||
"file_type": item.file_type,
|
||||
"target_table": item.target_table,
|
||||
"primary_key_field": item.primary_key_field,
|
||||
"primary_key_value": item.primary_key_value,
|
||||
"extra_data": item.extra_data,
|
||||
}
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"skip": skip,
|
||||
"limit": limit,
|
||||
"items": [serialize(i) for i in items],
|
||||
}
|
||||
|
||||
|
||||
@router.get("/options")
|
||||
async def flexible_options(
|
||||
db: Session = Depends(get_db),
|
||||
current_user=Depends(get_admin_user),
|
||||
):
|
||||
"""Return distinct file types and target tables for filter dropdowns."""
|
||||
file_types: List[str] = [
|
||||
ft for (ft,) in db.query(func.distinct(FlexibleImport.file_type)).order_by(FlexibleImport.file_type.asc()).all()
|
||||
if ft is not None
|
||||
]
|
||||
target_tables: List[str] = [
|
||||
tt for (tt,) in db.query(func.distinct(FlexibleImport.target_table)).order_by(FlexibleImport.target_table.asc()).all()
|
||||
if tt is not None and tt != ""
|
||||
]
|
||||
return {"file_types": file_types, "target_tables": target_tables}
|
||||
|
||||
|
||||
@router.get("/export")
|
||||
async def export_unmapped_csv(
|
||||
file_type: Optional[str] = Query(None, description="Filter by CSV file type (e.g., FILES.csv)"),
|
||||
target_table: Optional[str] = Query(None, description="Filter by target model table name"),
|
||||
has_keys: Optional[List[str]] = Query(
|
||||
None,
|
||||
description="Filter rows where extra_data (or its 'unmapped' payload) contains these keys. Repeat param for multiple keys.",
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user=Depends(get_admin_user),
|
||||
):
|
||||
"""Export unmapped rows as CSV for review. Includes basic metadata columns and unmapped fields.
|
||||
|
||||
If FlexibleImport.extra_data contains a nested 'unmapped' dict, those keys are exported.
|
||||
Otherwise, all keys of extra_data are exported.
|
||||
"""
|
||||
query = db.query(FlexibleImport)
|
||||
if file_type:
|
||||
query = query.filter(FlexibleImport.file_type == file_type)
|
||||
if target_table:
|
||||
query = query.filter(FlexibleImport.target_table == target_table)
|
||||
if has_keys:
|
||||
for k in [k for k in has_keys if k is not None and str(k).strip() != ""]:
|
||||
key = str(k).strip()
|
||||
query = query.filter(cast(FlexibleImport.extra_data, String).ilike(f'%"{key}":%'))
|
||||
|
||||
rows: List[FlexibleImport] = query.order_by(FlexibleImport.id.asc()).all()
|
||||
if not rows:
|
||||
raise HTTPException(status_code=404, detail="No matching flexible imports to export")
|
||||
|
||||
# Determine union of unmapped keys across all rows
|
||||
unmapped_keys: List[str] = []
|
||||
key_set = set()
|
||||
for r in rows:
|
||||
data = r.extra_data or {}
|
||||
payload = data.get("unmapped") if isinstance(data, dict) and isinstance(data.get("unmapped"), dict) else data
|
||||
if isinstance(payload, dict):
|
||||
for k in payload.keys():
|
||||
if k not in key_set:
|
||||
key_set.add(k)
|
||||
unmapped_keys.append(k)
|
||||
|
||||
# Prepare CSV
|
||||
meta_headers = [
|
||||
"id",
|
||||
"file_type",
|
||||
"target_table",
|
||||
"primary_key_field",
|
||||
"primary_key_value",
|
||||
]
|
||||
fieldnames = meta_headers + unmapped_keys
|
||||
|
||||
output = io.StringIO()
|
||||
writer = csv.DictWriter(output, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
for r in rows:
|
||||
row_out: Dict[str, Any] = {
|
||||
"id": r.id,
|
||||
"file_type": r.file_type,
|
||||
"target_table": r.target_table or "",
|
||||
"primary_key_field": r.primary_key_field or "",
|
||||
"primary_key_value": r.primary_key_value or "",
|
||||
}
|
||||
data = r.extra_data or {}
|
||||
payload = data.get("unmapped") if isinstance(data, dict) and isinstance(data.get("unmapped"), dict) else data
|
||||
if isinstance(payload, dict):
|
||||
for k in unmapped_keys:
|
||||
v = payload.get(k)
|
||||
# Normalize lists/dicts to JSON strings for CSV safety
|
||||
if isinstance(v, (dict, list)):
|
||||
try:
|
||||
import json as _json
|
||||
row_out[k] = _json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
row_out[k] = str(v)
|
||||
else:
|
||||
row_out[k] = v if v is not None else ""
|
||||
writer.writerow(row_out)
|
||||
|
||||
output.seek(0)
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename_parts = ["flexible_unmapped"]
|
||||
if file_type:
|
||||
filename_parts.append(file_type.replace("/", "-").replace(" ", "_"))
|
||||
if target_table:
|
||||
filename_parts.append(target_table.replace("/", "-").replace(" ", "_"))
|
||||
filename = "_".join(filename_parts) + f"_{timestamp}.csv"
|
||||
|
||||
return StreamingResponse(
|
||||
iter([output.getvalue()]),
|
||||
media_type="text/csv",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename=\"{filename}\"",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/export/{row_id}")
|
||||
async def export_single_row_csv(
|
||||
row_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user=Depends(get_admin_user),
|
||||
):
|
||||
"""Export a single flexible import row as CSV.
|
||||
|
||||
Includes metadata columns plus keys from the row's unmapped payload.
|
||||
If FlexibleImport.extra_data contains a nested 'unmapped' dict, those keys are exported;
|
||||
otherwise, all keys of extra_data are exported.
|
||||
"""
|
||||
row: Optional[FlexibleImport] = (
|
||||
db.query(FlexibleImport).filter(FlexibleImport.id == row_id).first()
|
||||
)
|
||||
if not row:
|
||||
raise HTTPException(status_code=404, detail="Flexible import row not found")
|
||||
|
||||
data = row.extra_data or {}
|
||||
payload = (
|
||||
data.get("unmapped")
|
||||
if isinstance(data, dict) and isinstance(data.get("unmapped"), dict)
|
||||
else data
|
||||
)
|
||||
|
||||
unmapped_keys: List[str] = []
|
||||
if isinstance(payload, dict):
|
||||
for k in payload.keys():
|
||||
unmapped_keys.append(k)
|
||||
|
||||
meta_headers = [
|
||||
"id",
|
||||
"file_type",
|
||||
"target_table",
|
||||
"primary_key_field",
|
||||
"primary_key_value",
|
||||
]
|
||||
fieldnames = meta_headers + unmapped_keys
|
||||
|
||||
output = io.StringIO()
|
||||
writer = csv.DictWriter(output, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
row_out: Dict[str, Any] = {
|
||||
"id": row.id,
|
||||
"file_type": row.file_type,
|
||||
"target_table": row.target_table or "",
|
||||
"primary_key_field": row.primary_key_field or "",
|
||||
"primary_key_value": row.primary_key_value or "",
|
||||
}
|
||||
if isinstance(payload, dict):
|
||||
for k in unmapped_keys:
|
||||
v = payload.get(k)
|
||||
if isinstance(v, (dict, list)):
|
||||
try:
|
||||
import json as _json
|
||||
row_out[k] = _json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
row_out[k] = str(v)
|
||||
else:
|
||||
row_out[k] = v if v is not None else ""
|
||||
|
||||
writer.writerow(row_out)
|
||||
output.seek(0)
|
||||
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = (
|
||||
f"flexible_row_{row.id}_{row.file_type.replace('/', '-').replace(' ', '_')}_{timestamp}.csv"
|
||||
if row.file_type
|
||||
else f"flexible_row_{row.id}_{timestamp}.csv"
|
||||
)
|
||||
|
||||
return StreamingResponse(
|
||||
iter([output.getvalue()]),
|
||||
media_type="text/csv",
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename=\"{filename}\"",
|
||||
},
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Advanced Search API endpoints - Comprehensive search across all data types
|
||||
"""
|
||||
from typing import List, Optional, Union, Dict, Any
|
||||
from typing import List, Optional, Union, Dict, Any, Tuple
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query, Body
|
||||
from sqlalchemy.orm import Session, joinedload
|
||||
from sqlalchemy import or_, and_, func, desc, asc, text, case, cast, String, DateTime, Date, Numeric
|
||||
@@ -11,6 +11,14 @@ import json
|
||||
import re
|
||||
|
||||
from app.database.base import get_db
|
||||
from app.api.search_highlight import (
|
||||
build_query_tokens,
|
||||
highlight_text,
|
||||
create_customer_highlight,
|
||||
create_file_highlight,
|
||||
create_ledger_highlight,
|
||||
create_qdro_highlight,
|
||||
)
|
||||
from app.models.rolodex import Rolodex, Phone
|
||||
from app.models.files import File
|
||||
from app.models.ledger import Ledger
|
||||
@@ -1059,60 +1067,16 @@ def _calculate_document_relevance(doc: FormIndex, query: str) -> float:
|
||||
|
||||
# Highlight functions
|
||||
def _create_customer_highlight(customer: Rolodex, query: str) -> str:
|
||||
"""Create highlight snippet for customer"""
|
||||
if not query:
|
||||
return ""
|
||||
|
||||
full_name = f"{customer.first or ''} {customer.last}".strip()
|
||||
if query.lower() in full_name.lower():
|
||||
return f"Name: {full_name}"
|
||||
|
||||
if customer.email and query.lower() in customer.email.lower():
|
||||
return f"Email: {customer.email}"
|
||||
|
||||
if customer.city and query.lower() in customer.city.lower():
|
||||
return f"City: {customer.city}"
|
||||
|
||||
return ""
|
||||
return create_customer_highlight(customer, query)
|
||||
|
||||
|
||||
def _create_file_highlight(file_obj: File, query: str) -> str:
|
||||
"""Create highlight snippet for file"""
|
||||
if not query:
|
||||
return ""
|
||||
|
||||
if file_obj.regarding and query.lower() in file_obj.regarding.lower():
|
||||
return f"Matter: {file_obj.regarding}"
|
||||
|
||||
if file_obj.file_type and query.lower() in file_obj.file_type.lower():
|
||||
return f"Type: {file_obj.file_type}"
|
||||
|
||||
return ""
|
||||
return create_file_highlight(file_obj, query)
|
||||
|
||||
|
||||
def _create_ledger_highlight(ledger: Ledger, query: str) -> str:
|
||||
"""Create highlight snippet for ledger"""
|
||||
if not query:
|
||||
return ""
|
||||
|
||||
if ledger.note and query.lower() in ledger.note.lower():
|
||||
return f"Note: {ledger.note[:100]}..."
|
||||
|
||||
return ""
|
||||
return create_ledger_highlight(ledger, query)
|
||||
|
||||
|
||||
def _create_qdro_highlight(qdro: QDRO, query: str) -> str:
|
||||
"""Create highlight snippet for QDRO"""
|
||||
if not query:
|
||||
return ""
|
||||
|
||||
if qdro.form_name and query.lower() in qdro.form_name.lower():
|
||||
return f"Form: {qdro.form_name}"
|
||||
|
||||
if qdro.pet and query.lower() in qdro.pet.lower():
|
||||
return f"Petitioner: {qdro.pet}"
|
||||
|
||||
if qdro.case_number and query.lower() in qdro.case_number.lower():
|
||||
return f"Case: {qdro.case_number}"
|
||||
|
||||
return ""
|
||||
return create_qdro_highlight(qdro, query)
|
||||
141
app/api/search_highlight.py
Normal file
141
app/api/search_highlight.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""
|
||||
Server-side highlight utilities for search results.
|
||||
|
||||
These functions generate HTML snippets with <strong> around matched tokens,
|
||||
preserving the original casing of the source text. The output is intended to be
|
||||
sanitized on the client before insertion into the DOM.
|
||||
"""
|
||||
from typing import List, Tuple, Any
|
||||
import re
|
||||
|
||||
|
||||
def build_query_tokens(query: str) -> List[str]:
|
||||
"""Split query into alphanumeric tokens, trimming punctuation and deduping while preserving order."""
|
||||
if not query:
|
||||
return []
|
||||
raw_parts = re.sub(r"[,_;:]+", " ", str(query or "").strip()).split()
|
||||
cleaned: List[str] = []
|
||||
seen = set()
|
||||
for part in raw_parts:
|
||||
token = re.sub(r"^[^A-Za-z0-9]+|[^A-Za-z0-9]+$", "", part)
|
||||
lowered = token.lower()
|
||||
if token and lowered not in seen:
|
||||
cleaned.append(token)
|
||||
seen.add(lowered)
|
||||
return cleaned
|
||||
|
||||
|
||||
def _merge_ranges(ranges: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
|
||||
if not ranges:
|
||||
return []
|
||||
ranges.sort(key=lambda x: (x[0], x[1]))
|
||||
merged: List[Tuple[int, int]] = []
|
||||
cur_s, cur_e = ranges[0]
|
||||
for s, e in ranges[1:]:
|
||||
if s <= cur_e:
|
||||
cur_e = max(cur_e, e)
|
||||
else:
|
||||
merged.append((cur_s, cur_e))
|
||||
cur_s, cur_e = s, e
|
||||
merged.append((cur_s, cur_e))
|
||||
return merged
|
||||
|
||||
|
||||
def highlight_text(value: str, tokens: List[str]) -> str:
|
||||
"""Return `value` with case-insensitive matches of `tokens` wrapped in <strong>, preserving original casing."""
|
||||
if value is None:
|
||||
return ""
|
||||
source = str(value)
|
||||
if not source or not tokens:
|
||||
return source
|
||||
haystack = source.lower()
|
||||
ranges: List[Tuple[int, int]] = []
|
||||
for t in tokens:
|
||||
needle = str(t or "").lower()
|
||||
if not needle:
|
||||
continue
|
||||
start = 0
|
||||
last_possible = max(0, len(haystack) - len(needle))
|
||||
while start <= last_possible and len(needle) > 0:
|
||||
idx = haystack.find(needle, start)
|
||||
if idx == -1:
|
||||
break
|
||||
ranges.append((idx, idx + len(needle)))
|
||||
start = idx + 1
|
||||
if not ranges:
|
||||
return source
|
||||
parts: List[str] = []
|
||||
merged = _merge_ranges(ranges)
|
||||
pos = 0
|
||||
for s, e in merged:
|
||||
if pos < s:
|
||||
parts.append(source[pos:s])
|
||||
parts.append("<strong>" + source[s:e] + "</strong>")
|
||||
pos = e
|
||||
if pos < len(source):
|
||||
parts.append(source[pos:])
|
||||
return "".join(parts)
|
||||
|
||||
|
||||
def create_customer_highlight(customer: Any, query: str) -> str:
|
||||
if not query:
|
||||
return ""
|
||||
tokens = build_query_tokens(query)
|
||||
full_name = f"{getattr(customer, 'first', '') or ''} {getattr(customer, 'last', '')}".strip()
|
||||
email = getattr(customer, 'email', None)
|
||||
city = getattr(customer, 'city', None)
|
||||
ql = query.lower()
|
||||
|
||||
if full_name and ql in full_name.lower():
|
||||
return f"Name: {highlight_text(full_name, tokens)}"
|
||||
if email and ql in str(email).lower():
|
||||
return f"Email: {highlight_text(str(email), tokens)}"
|
||||
if city and ql in str(city).lower():
|
||||
return f"City: {highlight_text(str(city), tokens)}"
|
||||
return ""
|
||||
|
||||
|
||||
def create_file_highlight(file_obj: Any, query: str) -> str:
|
||||
if not query:
|
||||
return ""
|
||||
tokens = build_query_tokens(query)
|
||||
regarding = getattr(file_obj, 'regarding', None)
|
||||
file_type = getattr(file_obj, 'file_type', None)
|
||||
ql = query.lower()
|
||||
if regarding and ql in str(regarding).lower():
|
||||
return f"Matter: {highlight_text(str(regarding), tokens)}"
|
||||
if file_type and ql in str(file_type).lower():
|
||||
return f"Type: {highlight_text(str(file_type), tokens)}"
|
||||
return ""
|
||||
|
||||
|
||||
def create_ledger_highlight(ledger: Any, query: str) -> str:
|
||||
if not query:
|
||||
return ""
|
||||
tokens = build_query_tokens(query)
|
||||
note = getattr(ledger, 'note', None)
|
||||
if note and query.lower() in str(note).lower():
|
||||
text = str(note) or ""
|
||||
preview = text[:160]
|
||||
suffix = "..." if len(text) > 160 else ""
|
||||
return f"Note: {highlight_text(preview, tokens)}{suffix}"
|
||||
return ""
|
||||
|
||||
|
||||
def create_qdro_highlight(qdro: Any, query: str) -> str:
|
||||
if not query:
|
||||
return ""
|
||||
tokens = build_query_tokens(query)
|
||||
form_name = getattr(qdro, 'form_name', None)
|
||||
pet = getattr(qdro, 'pet', None)
|
||||
case_number = getattr(qdro, 'case_number', None)
|
||||
ql = query.lower()
|
||||
if form_name and ql in str(form_name).lower():
|
||||
return f"Form: {highlight_text(str(form_name), tokens)}"
|
||||
if pet and ql in str(pet).lower():
|
||||
return f"Petitioner: {highlight_text(str(pet), tokens)}"
|
||||
if case_number and ql in str(case_number).lower():
|
||||
return f"Case: {highlight_text(str(case_number), tokens)}"
|
||||
return ""
|
||||
|
||||
|
||||
@@ -9,8 +9,9 @@ from datetime import datetime
|
||||
import secrets
|
||||
|
||||
from app.database.base import get_db
|
||||
from app.models import User, SupportTicket, TicketResponse, TicketStatus, TicketPriority, TicketCategory
|
||||
from app.models import User, SupportTicket, TicketResponse as TicketResponseModel, TicketStatus, TicketPriority, TicketCategory
|
||||
from app.auth.security import get_current_user, get_admin_user
|
||||
from app.services.audit import audit_service
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -46,7 +47,7 @@ class ResponseCreate(BaseModel):
|
||||
is_internal: bool = False
|
||||
|
||||
|
||||
class TicketResponse(BaseModel):
|
||||
class TicketResponseOut(BaseModel):
|
||||
"""Ticket response model"""
|
||||
id: int
|
||||
ticket_id: int
|
||||
@@ -80,7 +81,7 @@ class TicketDetail(BaseModel):
|
||||
assigned_to: Optional[int]
|
||||
assigned_admin_name: Optional[str]
|
||||
submitter_name: Optional[str]
|
||||
responses: List[TicketResponse] = []
|
||||
responses: List[TicketResponseOut] = []
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
@@ -135,6 +136,20 @@ async def create_support_ticket(
|
||||
db.commit()
|
||||
db.refresh(new_ticket)
|
||||
|
||||
# Audit logging (non-blocking)
|
||||
try:
|
||||
audit_service.log_action(
|
||||
db=db,
|
||||
action="CREATE",
|
||||
resource_type="SUPPORT_TICKET",
|
||||
user=current_user,
|
||||
resource_id=str(new_ticket.id),
|
||||
details={"ticket_number": new_ticket.ticket_number},
|
||||
request=request,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {
|
||||
"message": "Support ticket created successfully",
|
||||
"ticket_number": new_ticket.ticket_number,
|
||||
@@ -225,7 +240,7 @@ async def get_ticket(
|
||||
ticket = db.query(SupportTicket).options(
|
||||
joinedload(SupportTicket.submitter),
|
||||
joinedload(SupportTicket.assigned_admin),
|
||||
joinedload(SupportTicket.responses).joinedload(TicketResponse.author)
|
||||
joinedload(SupportTicket.responses).joinedload(TicketResponseModel.author)
|
||||
).filter(SupportTicket.id == ticket_id).first()
|
||||
|
||||
if not ticket:
|
||||
@@ -303,8 +318,19 @@ async def update_ticket(
|
||||
ticket.updated_at = datetime.utcnow()
|
||||
db.commit()
|
||||
|
||||
# Log the update (audit logging can be added later)
|
||||
# TODO: Add audit logging for ticket updates
|
||||
# Audit logging (non-blocking)
|
||||
try:
|
||||
audit_service.log_action(
|
||||
db=db,
|
||||
action="UPDATE",
|
||||
resource_type="SUPPORT_TICKET",
|
||||
user=current_user,
|
||||
resource_id=str(ticket_id),
|
||||
details={"changes": changes} if changes else None,
|
||||
request=request,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {"message": "Ticket updated successfully"}
|
||||
|
||||
@@ -327,7 +353,7 @@ async def add_response(
|
||||
)
|
||||
|
||||
# Create response
|
||||
response = TicketResponse(
|
||||
response = TicketResponseModel(
|
||||
ticket_id=ticket_id,
|
||||
message=response_data.message,
|
||||
is_internal=response_data.is_internal,
|
||||
@@ -343,8 +369,19 @@ async def add_response(
|
||||
db.commit()
|
||||
db.refresh(response)
|
||||
|
||||
# Log the response (audit logging can be added later)
|
||||
# TODO: Add audit logging for ticket responses
|
||||
# Audit logging (non-blocking)
|
||||
try:
|
||||
audit_service.log_action(
|
||||
db=db,
|
||||
action="ADD_RESPONSE",
|
||||
resource_type="SUPPORT_TICKET",
|
||||
user=current_user,
|
||||
resource_id=str(ticket_id),
|
||||
details={"response_id": response.id, "is_internal": response_data.is_internal},
|
||||
request=request,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {"message": "Response added successfully", "response_id": response.id}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user