fixing rolodex and search

This commit is contained in:
HotSwapp
2025-08-11 21:58:25 -05:00
parent 278eb7c5d4
commit c76b68d009
25 changed files with 1651 additions and 915 deletions

View File

@@ -96,7 +96,7 @@ async def search_by_phone(
"""Search customers by phone number (legacy phone search feature)"""
phones = db.query(Phone).join(Rolodex).filter(
Phone.phone.contains(phone)
).options(joinedload(Phone.rolodex)).all()
).options(joinedload(Phone.rolodex_entry)).all()
results = []
for phone_record in phones:
@@ -104,10 +104,10 @@ async def search_by_phone(
"phone": phone_record.phone,
"location": phone_record.location,
"customer": {
"id": phone_record.rolodex.id,
"name": f"{phone_record.rolodex.first or ''} {phone_record.rolodex.last}".strip(),
"city": phone_record.rolodex.city,
"state": phone_record.rolodex.abrev
"id": phone_record.rolodex_entry.id,
"name": f"{phone_record.rolodex_entry.first or ''} {phone_record.rolodex_entry.last}".strip(),
"city": phone_record.rolodex_entry.city,
"state": phone_record.rolodex_entry.abrev
}
})
@@ -178,21 +178,25 @@ async def list_customers(
current_user: User = Depends(get_current_user)
):
"""List customers with pagination and search"""
query = db.query(Rolodex).options(joinedload(Rolodex.phone_numbers))
if search:
query = query.filter(
or_(
Rolodex.id.contains(search),
Rolodex.last.contains(search),
Rolodex.first.contains(search),
Rolodex.city.contains(search),
Rolodex.email.contains(search)
try:
query = db.query(Rolodex).options(joinedload(Rolodex.phone_numbers))
if search:
query = query.filter(
or_(
Rolodex.id.contains(search),
Rolodex.last.contains(search),
Rolodex.first.contains(search),
Rolodex.city.contains(search),
Rolodex.email.contains(search)
)
)
)
customers = query.offset(skip).limit(limit).all()
return customers
customers = query.offset(skip).limit(limit).all()
return customers
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error loading customers: {str(e)}")
@router.get("/{customer_id}", response_model=CustomerResponse)

View File

@@ -72,7 +72,7 @@ FIELD_MAPPINGS = {
"A3": "a3",
"City": "city",
"Abrev": "abrev",
# "St": "st", # Full state name - not mapped (model only has abrev)
"St": None, # Full state name - skip this field as model only has abrev
"Zip": "zip",
"Email": "email",
"DOB": "dob",
@@ -366,6 +366,7 @@ def parse_date(date_str: str) -> Optional[datetime]:
return None
def convert_value(value: str, field_name: str) -> Any:
"""Convert string value to appropriate type based on field name"""
if not value or value.strip() == "" or value.strip().lower() in ["null", "none", "n/a"]:
@@ -483,8 +484,116 @@ async def import_csv_data(
try:
# Read CSV content
content = await file.read()
csv_content = content.decode('utf-8')
csv_reader = csv.DictReader(io.StringIO(csv_content))
# Try multiple encodings for legacy CSV files
encodings = ['utf-8', 'windows-1252', 'iso-8859-1', 'cp1252']
csv_content = None
for encoding in encodings:
try:
csv_content = content.decode(encoding)
break
except UnicodeDecodeError:
continue
if csv_content is None:
raise HTTPException(status_code=400, detail="Could not decode CSV file. Please ensure it's saved in UTF-8, Windows-1252, or ISO-8859-1 encoding.")
# Preprocess CSV content to fix common legacy issues
def preprocess_csv(content):
lines = content.split('\n')
cleaned_lines = []
i = 0
while i < len(lines):
line = lines[i]
# If line doesn't have the expected number of commas, it might be a broken multi-line field
if i == 0: # Header line
cleaned_lines.append(line)
expected_comma_count = line.count(',')
i += 1
continue
# Check if this line has the expected number of commas
if line.count(',') < expected_comma_count:
# This might be a continuation of the previous line
# Try to merge with previous line
if cleaned_lines:
cleaned_lines[-1] += " " + line.replace('\n', ' ').replace('\r', ' ')
else:
cleaned_lines.append(line)
else:
cleaned_lines.append(line)
i += 1
return '\n'.join(cleaned_lines)
# Custom robust parser for problematic legacy CSV files
class MockCSVReader:
def __init__(self, data, fieldnames):
self.data = data
self.fieldnames = fieldnames
self.index = 0
def __iter__(self):
return self
def __next__(self):
if self.index >= len(self.data):
raise StopIteration
row = self.data[self.index]
self.index += 1
return row
try:
lines = csv_content.strip().split('\n')
if not lines:
raise ValueError("Empty CSV file")
# Parse header using proper CSV parsing
header_reader = csv.reader(io.StringIO(lines[0]))
headers = next(header_reader)
headers = [h.strip() for h in headers]
print(f"DEBUG: Found {len(headers)} headers: {headers}")
# Parse data rows with proper CSV parsing
rows_data = []
skipped_rows = 0
for line_num, line in enumerate(lines[1:], start=2):
# Skip empty lines
if not line.strip():
continue
try:
# Use proper CSV parsing to handle commas within quoted fields
line_reader = csv.reader(io.StringIO(line))
fields = next(line_reader)
fields = [f.strip() for f in fields]
# Skip rows that are clearly malformed (too few fields)
if len(fields) < len(headers) // 2: # Less than half the expected fields
skipped_rows += 1
continue
# Pad or truncate to match header length
while len(fields) < len(headers):
fields.append('')
fields = fields[:len(headers)]
row_dict = dict(zip(headers, fields))
rows_data.append(row_dict)
except Exception as row_error:
print(f"Skipping malformed row {line_num}: {row_error}")
skipped_rows += 1
continue
csv_reader = MockCSVReader(rows_data, headers)
print(f"SUCCESS: Parsed {len(rows_data)} rows (skipped {skipped_rows} malformed rows)")
except Exception as e:
print(f"Custom parsing failed: {e}")
raise HTTPException(status_code=400, detail=f"Could not parse CSV file. The file appears to have serious formatting issues. Error: {str(e)}")
imported_count = 0
errors = []
@@ -500,7 +609,7 @@ async def import_csv_data(
model_data = {}
for csv_field, db_field in field_mapping.items():
if csv_field in row:
if csv_field in row and db_field is not None: # Skip fields mapped to None
converted_value = convert_value(row[csv_field], csv_field)
if converted_value is not None:
model_data[db_field] = converted_value
@@ -509,6 +618,15 @@ async def import_csv_data(
if not any(model_data.values()):
continue
# Special validation for models with required fields
if model_class == Phone:
if 'phone' not in model_data or not model_data['phone']:
continue # Skip phone records without a phone number
if model_class == Rolodex:
if 'last' not in model_data or not model_data['last']:
continue # Skip rolodex records without a last name/company name
# Create model instance
instance = model_class(**model_data)
db.add(instance)
@@ -542,6 +660,9 @@ async def import_csv_data(
return result
except Exception as e:
print(f"IMPORT ERROR DEBUG: {type(e).__name__}: {str(e)}")
import traceback
print(f"TRACEBACK: {traceback.format_exc()}")
db.rollback()
raise HTTPException(status_code=500, detail=f"Import failed: {str(e)}")
@@ -616,8 +737,22 @@ async def validate_csv_file(
try:
content = await file.read()
csv_content = content.decode('utf-8')
csv_reader = csv.DictReader(io.StringIO(csv_content))
# Try multiple encodings for legacy CSV files
encodings = ['utf-8', 'windows-1252', 'iso-8859-1', 'cp1252']
csv_content = None
for encoding in encodings:
try:
csv_content = content.decode(encoding)
break
except UnicodeDecodeError:
continue
if csv_content is None:
raise HTTPException(status_code=400, detail="Could not decode CSV file. Please ensure it's saved in UTF-8, Windows-1252, or ISO-8859-1 encoding.")
# Handle CSV parsing issues with legacy files
csv_reader = csv.DictReader(io.StringIO(csv_content), delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Check headers
csv_headers = csv_reader.fieldnames
@@ -664,6 +799,9 @@ async def validate_csv_file(
}
except Exception as e:
print(f"VALIDATION ERROR DEBUG: {type(e).__name__}: {str(e)}")
import traceback
print(f"VALIDATION TRACEBACK: {traceback.format_exc()}")
raise HTTPException(status_code=500, detail=f"Validation failed: {str(e)}")
@@ -737,8 +875,27 @@ async def batch_import_csv_files(
field_mapping = FIELD_MAPPINGS.get(file_type, {})
content = await file.read()
csv_content = content.decode('utf-8-sig')
csv_reader = csv.DictReader(io.StringIO(csv_content))
# Try multiple encodings for legacy CSV files
encodings = ['utf-8-sig', 'utf-8', 'windows-1252', 'iso-8859-1', 'cp1252']
csv_content = None
for encoding in encodings:
try:
csv_content = content.decode(encoding)
break
except UnicodeDecodeError:
continue
if csv_content is None:
results.append({
"file_type": file_type,
"status": "failed",
"message": "Could not decode CSV file encoding"
})
continue
# Handle CSV parsing issues with legacy files
csv_reader = csv.DictReader(io.StringIO(csv_content), delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
imported_count = 0
errors = []
@@ -752,7 +909,7 @@ async def batch_import_csv_files(
try:
model_data = {}
for csv_field, db_field in field_mapping.items():
if csv_field in row:
if csv_field in row and db_field is not None: # Skip fields mapped to None
converted_value = convert_value(row[csv_field], csv_field)
if converted_value is not None:
model_data[db_field] = converted_value