Compare commits

..

3 Commits

Author SHA1 Message Date
HotSwapp
fdcff9fbb2 Expand encoding fallback to handle more legacy CSV encodings
- Added windows-1252, cp1250, iso-8859-1 to encoding fallback list
- Enhanced error logging in open_text_with_fallbacks function
- Improved error messages to show all attempted encodings
- Added warning logs for each encoding attempt that fails

This should resolve 'charmap' codec errors and other encoding issues with legacy CSV files that use different Windows codepages or ISO encodings.
2025-10-07 22:25:34 -05:00
HotSwapp
09ef56fc1d Apply encoding fallback to all CSV importers (phone, files, ledger, payments, qdros)
- Updated import_phone_data to use open_text_with_fallbacks for encoding support
- Updated import_files_data to use open_text_with_fallbacks for encoding support
- Updated import_ledger_data to use open_text_with_fallbacks for encoding support
- Updated import_qdros_data to use open_text_with_fallbacks for encoding support
- Updated import_payments_data to use open_text_with_fallbacks for encoding support

All CSV import functions now use the same encoding fallback pattern that tries utf-8, utf-8-sig, cp1252, and latin-1 encodings to handle legacy CSV files with different encodings.
2025-10-07 22:21:07 -05:00
HotSwapp
58b2bb9a6c Add stored filename visibility and auto-select functionality to admin upload results
- Added 'Stored Filename' column to Upload Results table showing the actual filename used for storage
- Added 'Select All' button for each import type section to quickly select/deselect all files
- Improved JavaScript to handle select all/deselect all functionality with proper button state management
- Enhanced UI to clearly distinguish between original and stored filenames
2025-10-07 22:15:08 -05:00
2 changed files with 338 additions and 216 deletions

View File

@@ -61,6 +61,34 @@ if not SECRET_KEY:
# Configure structured logging
setup_logging()
logger = structlog.get_logger(__name__)
def open_text_with_fallbacks(file_path: str):
"""
Open a text file trying multiple encodings commonly seen in legacy CSVs.
Attempts in order: utf-8, utf-8-sig, cp1252, windows-1252, cp1250, iso-8859-1, latin-1.
Returns a tuple of (file_object, encoding_used). Caller is responsible to close file.
"""
encodings = ["utf-8", "utf-8-sig", "cp1252", "windows-1252", "cp1250", "iso-8859-1", "latin-1"]
last_error = None
for enc in encodings:
try:
f = open(file_path, 'r', encoding=enc, errors='strict', newline='')
# Try reading a tiny chunk to force decoding errors early
_ = f.read(1024)
f.seek(0)
logger.info("csv_open_encoding_selected", file=file_path, encoding=enc)
return f, enc
except Exception as e:
last_error = e
logger.warning("encoding_fallback_failed", file=file_path, encoding=enc, error=str(e))
continue
error_msg = f"Unable to open file '{file_path}' with any of the supported encodings: {', '.join(encodings)}"
if last_error:
error_msg += f". Last error: {str(last_error)}"
raise RuntimeError(error_msg)
# Configure Jinja2 templates
templates = Jinja2Templates(directory="app/templates")
@@ -218,21 +246,38 @@ def get_import_type_from_filename(filename: str) -> str:
Import type string (client, phone, case, transaction, document, payment)
"""
filename_upper = filename.upper()
# Strip extension and normalize
base = filename_upper.rsplit('.', 1)[0]
if filename_upper.startswith('ROLODEX') or filename_upper.startswith('ROLEX'):
# Support files saved with explicit type prefixes (e.g., CLIENT_<uuid>.csv)
if base.startswith('CLIENT_'):
return 'client'
elif filename_upper.startswith('PHONE'):
if base.startswith('PHONE_'):
return 'phone'
elif filename_upper.startswith('FILES'):
if base.startswith('CASE_'):
return 'case'
elif filename_upper.startswith('LEDGER'):
if base.startswith('TRANSACTION_'):
return 'transaction'
elif filename_upper.startswith('QDROS') or filename_upper.startswith('QDRO'):
if base.startswith('DOCUMENT_'):
return 'document'
elif filename_upper.startswith('PAYMENTS') or filename_upper.startswith('DEPOSITS'):
if base.startswith('PAYMENT_'):
return 'payment'
else:
raise ValueError(f"Unknown file type for filename: {filename}")
# Legacy/real file name patterns
if base.startswith('ROLODEX') or base.startswith('ROLEX') or 'ROLODEX' in base or 'ROLEX' in base:
return 'client'
if base.startswith('PHONE') or 'PHONE' in base:
return 'phone'
if base.startswith('FILES') or base.startswith('FILE') or 'FILES' in base:
return 'case'
if base.startswith('LEDGER') or 'LEDGER' in base or base.startswith('TRNSACTN') or 'TRNSACTN' in base:
return 'transaction'
if base.startswith('QDROS') or base.startswith('QDRO') or 'QDRO' in base:
return 'document'
if base.startswith('PAYMENTS') or base.startswith('DEPOSITS') or 'PAYMENT' in base or 'DEPOSIT' in base:
return 'payment'
raise ValueError(f"Unknown file type for filename: {filename}")
def validate_csv_headers(headers: List[str], expected_fields: Dict[str, str]) -> Dict[str, Any]:
@@ -370,7 +415,8 @@ def import_rolodex_data(db: Session, file_path: str) -> Dict[str, Any]:
}
try:
with open(file_path, 'r', encoding='utf-8') as file:
f, used_encoding = open_text_with_fallbacks(file_path)
with f as file:
reader = csv.DictReader(file)
# Validate headers
@@ -418,6 +464,7 @@ def import_rolodex_data(db: Session, file_path: str) -> Dict[str, Any]:
db.commit()
except Exception as e:
logger.error("rolodex_import_failed", file=file_path, error=str(e))
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
@@ -436,52 +483,56 @@ def import_phone_data(db: Session, file_path: str) -> Dict[str, Any]:
'total_rows': 0
}
f = None
try:
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
f, used_encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
client_id = row.get('Id', '').strip()
if not client_id:
result['errors'].append(f"Row {row_num}: Missing client ID")
continue
try:
client_id = row.get('Id', '').strip()
if not client_id:
result['errors'].append(f"Row {row_num}: Missing client ID")
continue
# Find the client
client = db.query(Client).filter(Client.rolodex_id == client_id).first()
if not client:
result['errors'].append(f"Row {row_num}: Client with ID '{client_id}' not found")
continue
# Find the client
client = db.query(Client).filter(Client.rolodex_id == client_id).first()
if not client:
result['errors'].append(f"Row {row_num}: Client with ID '{client_id}' not found")
continue
phone_number = row.get('Phone', '').strip()
if not phone_number:
result['errors'].append(f"Row {row_num}: Missing phone number")
continue
phone_number = row.get('Phone', '').strip()
if not phone_number:
result['errors'].append(f"Row {row_num}: Missing phone number")
continue
phone = Phone(
client_id=client.id,
phone_type=row.get('Location', '').strip() or 'primary',
phone_number=phone_number
)
phone = Phone(
client_id=client.id,
phone_type=row.get('Location', '').strip() or 'primary',
phone_number=phone_number
)
db.add(phone)
result['success'] += 1
db.add(phone)
result['success'] += 1
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.commit()
except Exception as e:
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
finally:
if f:
f.close()
return result
@@ -530,62 +581,66 @@ def import_files_data(db: Session, file_path: str) -> Dict[str, Any]:
'Memo': 'Memo'
}
f = None
try:
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
f, used_encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
headers = reader.fieldnames or []
validation = validate_csv_headers(headers, expected_fields)
headers = reader.fieldnames or []
validation = validate_csv_headers(headers, expected_fields)
if not validation['valid']:
result['errors'].append(f"Header validation failed: {validation['errors']}")
return result
if not validation['valid']:
result['errors'].append(f"Header validation failed: {validation['errors']}")
return result
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
# Check for existing case
existing = db.query(Case).filter(Case.file_no == file_no).first()
if existing:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' already exists")
continue
# Find client by ID
client_id = row.get('Id', '').strip()
client = None
if client_id:
client = db.query(Client).filter(Client.rolodex_id == client_id).first()
if not client:
result['errors'].append(f"Row {row_num}: Client with ID '{client_id}' not found")
continue
# Check for existing case
existing = db.query(Case).filter(Case.file_no == file_no).first()
if existing:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' already exists")
continue
case = Case(
file_no=file_no,
client_id=client.id if client else None,
status=row.get('Status', '').strip() or 'active',
case_type=row.get('File_Type', '').strip() or None,
description=row.get('Regarding', '').strip() or None,
open_date=parse_date(row.get('Opened', '')),
close_date=parse_date(row.get('Closed', ''))
)
# Find client by ID
client_id = row.get('Id', '').strip()
client = None
if client_id:
client = db.query(Client).filter(Client.rolodex_id == client_id).first()
if not client:
result['errors'].append(f"Row {row_num}: Client with ID '{client_id}' not found")
continue
db.add(case)
result['success'] += 1
case = Case(
file_no=file_no,
client_id=client.id if client else None,
status=row.get('Status', '').strip() or 'active',
case_type=row.get('File_Type', '').strip() or None,
description=row.get('Regarding', '').strip() or None,
open_date=parse_date(row.get('Opened', '')),
close_date=parse_date(row.get('Closed', ''))
)
db.add(case)
result['success'] += 1
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.commit()
except Exception as e:
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
finally:
if f:
f.close()
return result
@@ -602,81 +657,85 @@ def import_ledger_data(db: Session, file_path: str) -> Dict[str, Any]:
'total_rows': 0
}
f = None
try:
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
f, used_encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
headers = reader.fieldnames or []
if len(headers) < 3:
result['errors'].append("Invalid CSV format: expected at least 3 columns")
return result
headers = reader.fieldnames or []
if len(headers) < 3:
result['errors'].append("Invalid CSV format: expected at least 3 columns")
return result
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
amount = parse_float(row.get('Amount', '0'))
if amount is None:
result['errors'].append(f"Row {row_num}: Invalid amount")
continue
amount = parse_float(row.get('Amount', '0'))
if amount is None:
result['errors'].append(f"Row {row_num}: Invalid amount")
continue
tx_date = parse_date(row.get('Date', ''))
item_no = parse_int(row.get('Item_No', '') or '')
# ensure unique item_no per date by increment
# temp session-less check via while loop
desired_item_no = item_no if item_no is not None else 1
while True:
exists = (
db.query(Transaction)
.filter(
Transaction.case_id == case.id,
Transaction.transaction_date == tx_date,
Transaction.item_no == desired_item_no,
)
.first()
tx_date = parse_date(row.get('Date', ''))
item_no = parse_int(row.get('Item_No', '') or '')
# ensure unique item_no per date by increment
# temp session-less check via while loop
desired_item_no = item_no if item_no is not None else 1
while True:
exists = (
db.query(Transaction)
.filter(
Transaction.case_id == case.id,
Transaction.transaction_date == tx_date,
Transaction.item_no == desired_item_no,
)
if not exists:
break
desired_item_no += 1
transaction = Transaction(
case_id=case.id,
transaction_date=tx_date,
transaction_type=(row.get('T_Type', '').strip() or None),
t_type_l=(row.get('T_Type_L', '').strip().upper() or None),
amount=amount,
description=(row.get('Note', '').strip() or None),
reference=(row.get('Item_No', '').strip() or None),
item_no=desired_item_no,
employee_number=(row.get('Empl_Num', '').strip() or None),
t_code=(row.get('T_Code', '').strip().upper() or None),
quantity=parse_float(row.get('Quantity', '')),
rate=parse_float(row.get('Rate', '')),
billed=((row.get('Billed', '') or '').strip().upper() or None),
.first()
)
if not exists:
break
desired_item_no += 1
db.add(transaction)
result['success'] += 1
transaction = Transaction(
case_id=case.id,
transaction_date=tx_date,
transaction_type=(row.get('T_Type', '').strip() or None),
t_type_l=(row.get('T_Type_L', '').strip().upper() or None),
amount=amount,
description=(row.get('Note', '').strip() or None),
reference=(row.get('Item_No', '').strip() or None),
item_no=desired_item_no,
employee_number=(row.get('Empl_Num', '').strip() or None),
t_code=(row.get('T_Code', '').strip().upper() or None),
quantity=parse_float(row.get('Quantity', '')),
rate=parse_float(row.get('Rate', '')),
billed=((row.get('Billed', '') or '').strip().upper() or None),
)
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.add(transaction)
result['success'] += 1
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.commit()
except Exception as e:
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
finally:
if f:
f.close()
return result
@@ -693,49 +752,53 @@ def import_qdros_data(db: Session, file_path: str) -> Dict[str, Any]:
'total_rows': 0
}
f = None
try:
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
f, used_encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
document = Document(
case_id=case.id,
document_type=row.get('Document_Type', '').strip() or 'QDRO',
file_name=row.get('File_Name', '').strip() or None,
description=row.get('Description', '').strip() or None,
uploaded_date=parse_date(row.get('Date', ''))
)
document = Document(
case_id=case.id,
document_type=row.get('Document_Type', '').strip() or 'QDRO',
file_name=row.get('File_Name', '').strip() or None,
description=row.get('Description', '').strip() or None,
uploaded_date=parse_date(row.get('Date', ''))
)
db.add(document)
result['success'] += 1
db.add(document)
result['success'] += 1
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.commit()
except Exception as e:
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
finally:
if f:
f.close()
return result
@@ -752,55 +815,59 @@ def import_payments_data(db: Session, file_path: str) -> Dict[str, Any]:
'total_rows': 0
}
f = None
try:
with open(file_path, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
f, used_encoding = open_text_with_fallbacks(file_path)
reader = csv.DictReader(f)
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
headers = reader.fieldnames or []
if len(headers) < 2:
result['errors'].append("Invalid CSV format: expected at least 2 columns")
return result
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
for row_num, row in enumerate(reader, start=2):
result['total_rows'] += 1
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
try:
file_no = row.get('File_No', '').strip()
if not file_no:
result['errors'].append(f"Row {row_num}: Missing file number")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
# Find the case
case = db.query(Case).filter(Case.file_no == file_no).first()
if not case:
result['errors'].append(f"Row {row_num}: Case with file number '{file_no}' not found")
continue
amount = parse_float(row.get('Amount', '0'))
if amount is None:
result['errors'].append(f"Row {row_num}: Invalid amount")
continue
amount = parse_float(row.get('Amount', '0'))
if amount is None:
result['errors'].append(f"Row {row_num}: Invalid amount")
continue
payment = Payment(
case_id=case.id,
payment_date=parse_date(row.get('Date', '')),
payment_type=row.get('Type', '').strip() or None,
amount=amount,
description=row.get('Description', '').strip() or None,
check_number=row.get('Check_Number', '').strip() or None
)
payment = Payment(
case_id=case.id,
payment_date=parse_date(row.get('Date', '')),
payment_type=row.get('Type', '').strip() or None,
amount=amount,
description=row.get('Description', '').strip() or None,
check_number=row.get('Check_Number', '').strip() or None
)
db.add(payment)
result['success'] += 1
db.add(payment)
result['success'] += 1
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
except Exception as e:
result['errors'].append(f"Row {row_num}: {str(e)}")
db.commit()
except Exception as e:
result['errors'].append(f"Import failed: {str(e)}")
db.rollback()
finally:
if f:
f.close()
return result
@@ -1435,7 +1502,14 @@ async def admin_upload_files(
# Generate unique filename to avoid conflicts
file_id = str(uuid.uuid4())
file_ext = os.path.splitext(file.filename)[1]
stored_filename = f"{file_id}{file_ext}"
# Determine import type from original filename for better categorization later
try:
detected_type = get_import_type_from_filename(file.filename)
except ValueError:
detected_type = 'unknown'
# Prefix stored filename with detected type to preserve context
stored_filename = f"{detected_type}_{file_id}{file_ext}"
file_path = os.path.join(import_dir, stored_filename)
# Save file
@@ -1443,14 +1517,8 @@ async def admin_upload_files(
with open(file_path, "wb") as f:
f.write(contents)
# Determine import type from filename
try:
import_type = get_import_type_from_filename(file.filename)
except ValueError as e:
errors.append(f"File '{file.filename}': {str(e)}")
# Clean up uploaded file
os.remove(file_path)
continue
# Use detected type (already derived from original name)
import_type = detected_type
results.append({
'filename': file.filename,

View File

@@ -75,6 +75,7 @@
<thead>
<tr>
<th>Original Filename</th>
<th>Stored Filename</th>
<th>Import Type</th>
<th>Size</th>
<th>Status</th>
@@ -83,7 +84,16 @@
<tbody>
{% for result in upload_results %}
<tr>
<td>{{ result.filename }}</td>
<td>
<strong>{{ result.filename }}</strong>
<br>
<small class="text-muted">Original name</small>
</td>
<td>
<code class="small">{{ result.stored_filename }}</code>
<br>
<small class="text-muted">Stored as</small>
</td>
<td>
<span class="badge bg-primary">{{ result.import_type }}</span>
</td>
@@ -148,12 +158,18 @@
<div class="card-body">
<form action="/admin/import/{{ import_type }}" method="post">
<div class="mb-3">
<label class="form-label">Available Files:</label>
<div class="d-flex justify-content-between align-items-center mb-2">
<label class="form-label mb-0">Available Files:</label>
<button type="button" class="btn btn-outline-primary btn-sm select-all-btn"
data-import-type="{{ import_type }}">
<i class="bi bi-check-all me-1"></i>Select All
</button>
</div>
<div class="list-group">
{% for file in files %}
<label class="list-group-item d-flex justify-content-between align-items-center">
<div>
<input class="form-check-input me-2" type="checkbox"
<input class="form-check-input me-2 file-checkbox" type="checkbox"
name="selected_files" value="{{ file.filename }}" id="{{ file.filename }}">
<small class="text-muted">{{ file.filename }}</small>
<br>
@@ -350,16 +366,54 @@ document.addEventListener('DOMContentLoaded', function() {
// Start refresh cycle if there are running imports
refreshRunningImports();
// Select All functionality
document.querySelectorAll('.select-all-btn').forEach(button => {
button.addEventListener('click', function() {
const importType = this.getAttribute('data-import-type');
const form = this.closest('form');
const checkboxes = form.querySelectorAll('.file-checkbox');
const submitBtn = form.querySelector('button[type="submit"]');
// Toggle all checkboxes in this form
const allChecked = Array.from(checkboxes).every(cb => cb.checked);
checkboxes.forEach(checkbox => {
checkbox.checked = !allChecked;
});
// Update button text
this.innerHTML = allChecked ?
'<i class="bi bi-check-all me-1"></i>Select All' :
'<i class="bi bi-dash-square me-1"></i>Deselect All';
// Update submit button state
const hasSelection = Array.from(checkboxes).some(cb => cb.checked);
submitBtn.disabled = !hasSelection;
});
});
// File selection helpers
document.querySelectorAll('input[type="checkbox"]').forEach(checkbox => {
document.querySelectorAll('.file-checkbox').forEach(checkbox => {
checkbox.addEventListener('change', function() {
const form = this.closest('form');
const checkboxes = form.querySelectorAll('input[name="selected_files"]');
const checkboxes = form.querySelectorAll('.file-checkbox');
const submitBtn = form.querySelector('button[type="submit"]');
const selectAllBtn = form.querySelector('.select-all-btn');
// Enable/disable submit button based on selection
const hasSelection = Array.from(checkboxes).some(cb => cb.checked);
submitBtn.disabled = !hasSelection;
// Update select all button state
const allChecked = Array.from(checkboxes).every(cb => cb.checked);
const noneChecked = Array.from(checkboxes).every(cb => !cb.checked);
if (allChecked) {
selectAllBtn.innerHTML = '<i class="bi bi-dash-square me-1"></i>Deselect All';
} else if (noneChecked) {
selectAllBtn.innerHTML = '<i class="bi bi-check-all me-1"></i>Select All';
} else {
selectAllBtn.innerHTML = '<i class="bi bi-check-square me-1"></i>Select All';
}
});
});