Fix upload detection for model class names and add States/Printers/Setup import
- Enhanced get_import_type_from_filename() to recognize model class names (LegacyFile, FilesR, etc.) in addition to legacy CSV names - Added import functions for States, Printers, and Setup reference tables - Updated VALID_IMPORT_TYPES and IMPORT_ORDER to include new tables - Updated admin panel table counts to display new reference tables - Created UPLOAD_FIX.md documentation explaining the changes and how to handle existing unknown files This fixes the issue where files uploaded with model class names (e.g., LegacyFile.csv) were being categorized as 'unknown' instead of being properly detected.
This commit is contained in:
@@ -20,7 +20,7 @@ from .models import (
|
||||
Footers, FileStat, Employee, GroupLkup, FileType,
|
||||
Qdros, PlanInfo, Pensions, PensionMarriage, PensionDeath,
|
||||
PensionSchedule, PensionSeparate, PensionResults,
|
||||
RolexV, FVarLkup, RVarLkup
|
||||
RolexV, FVarLkup, RVarLkup, States, Printers, Setup
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
@@ -660,6 +660,221 @@ def import_rvarlkup(db: Session, file_path: str) -> Dict[str, Any]:
|
||||
return result
|
||||
|
||||
|
||||
def import_states(db: Session, file_path: str) -> Dict[str, Any]:
|
||||
"""Import STATES.csv → States model with upsert logic."""
|
||||
result = {'success': 0, 'errors': [], 'total_rows': 0, 'updated': 0, 'inserted': 0}
|
||||
|
||||
try:
|
||||
f, encoding = open_text_with_fallbacks(file_path)
|
||||
reader = csv.DictReader(f)
|
||||
|
||||
for row_num, row in enumerate(reader, start=2):
|
||||
result['total_rows'] += 1
|
||||
|
||||
try:
|
||||
abrev = clean_string(row.get('Abrev'))
|
||||
if not abrev:
|
||||
continue
|
||||
|
||||
# Check if record already exists
|
||||
existing = db.query(States).filter(States.abrev == abrev).first()
|
||||
|
||||
if existing:
|
||||
# Update existing record
|
||||
existing.st = clean_string(row.get('St'))
|
||||
result['updated'] += 1
|
||||
else:
|
||||
# Insert new record
|
||||
record = States(
|
||||
abrev=abrev,
|
||||
st=clean_string(row.get('St'))
|
||||
)
|
||||
db.add(record)
|
||||
result['inserted'] += 1
|
||||
|
||||
result['success'] += 1
|
||||
|
||||
# Commit in batches for performance
|
||||
if result['success'] % BATCH_SIZE == 0:
|
||||
db.commit()
|
||||
|
||||
except Exception as e:
|
||||
result['errors'].append(f"Row {row_num}: {str(e)}")
|
||||
db.rollback()
|
||||
|
||||
# Commit any remaining changes
|
||||
db.commit()
|
||||
|
||||
f.close()
|
||||
logger.info("import_states_complete", **result)
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
result['errors'].append(f"Fatal error: {str(e)}")
|
||||
logger.error("import_states_failed", error=str(e))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def import_printers(db: Session, file_path: str) -> Dict[str, Any]:
|
||||
"""Import PRINTERS.csv → Printers model with upsert logic."""
|
||||
result = {'success': 0, 'errors': [], 'total_rows': 0, 'updated': 0, 'inserted': 0}
|
||||
|
||||
try:
|
||||
f, encoding = open_text_with_fallbacks(file_path)
|
||||
reader = csv.DictReader(f)
|
||||
|
||||
for row_num, row in enumerate(reader, start=2):
|
||||
result['total_rows'] += 1
|
||||
|
||||
try:
|
||||
number_str = clean_string(row.get('Number'))
|
||||
if not number_str:
|
||||
continue
|
||||
|
||||
try:
|
||||
number = int(number_str)
|
||||
except ValueError:
|
||||
result['errors'].append(f"Row {row_num}: Invalid Number '{number_str}'")
|
||||
continue
|
||||
|
||||
# Check if record already exists
|
||||
existing = db.query(Printers).filter(Printers.number == number).first()
|
||||
|
||||
if existing:
|
||||
# Update existing record
|
||||
existing.name = clean_string(row.get('Name'))
|
||||
existing.port = clean_string(row.get('Port'))
|
||||
existing.page_break = clean_string(row.get('Page_Break'))
|
||||
existing.setup_st = clean_string(row.get('Setup_St'))
|
||||
existing.phone_book = clean_string(row.get('Phone_Book'))
|
||||
existing.rolodex_info = clean_string(row.get('Rolodex_Info'))
|
||||
existing.envelope = clean_string(row.get('Envelope'))
|
||||
existing.file_cabinet = clean_string(row.get('File_Cabinet'))
|
||||
existing.accounts = clean_string(row.get('Accounts'))
|
||||
existing.statements = clean_string(row.get('Statements'))
|
||||
existing.calendar = clean_string(row.get('Calendar'))
|
||||
existing.reset_st = clean_string(row.get('Reset_St'))
|
||||
existing.b_underline = clean_string(row.get('B_Underline'))
|
||||
existing.e_underline = clean_string(row.get('E_Underline'))
|
||||
existing.b_bold = clean_string(row.get('B_Bold'))
|
||||
existing.e_bold = clean_string(row.get('E_Bold'))
|
||||
result['updated'] += 1
|
||||
else:
|
||||
# Insert new record
|
||||
record = Printers(
|
||||
number=number,
|
||||
name=clean_string(row.get('Name')),
|
||||
port=clean_string(row.get('Port')),
|
||||
page_break=clean_string(row.get('Page_Break')),
|
||||
setup_st=clean_string(row.get('Setup_St')),
|
||||
phone_book=clean_string(row.get('Phone_Book')),
|
||||
rolodex_info=clean_string(row.get('Rolodex_Info')),
|
||||
envelope=clean_string(row.get('Envelope')),
|
||||
file_cabinet=clean_string(row.get('File_Cabinet')),
|
||||
accounts=clean_string(row.get('Accounts')),
|
||||
statements=clean_string(row.get('Statements')),
|
||||
calendar=clean_string(row.get('Calendar')),
|
||||
reset_st=clean_string(row.get('Reset_St')),
|
||||
b_underline=clean_string(row.get('B_Underline')),
|
||||
e_underline=clean_string(row.get('E_Underline')),
|
||||
b_bold=clean_string(row.get('B_Bold')),
|
||||
e_bold=clean_string(row.get('E_Bold'))
|
||||
)
|
||||
db.add(record)
|
||||
result['inserted'] += 1
|
||||
|
||||
result['success'] += 1
|
||||
|
||||
# Commit in batches for performance
|
||||
if result['success'] % BATCH_SIZE == 0:
|
||||
db.commit()
|
||||
|
||||
except Exception as e:
|
||||
result['errors'].append(f"Row {row_num}: {str(e)}")
|
||||
db.rollback()
|
||||
|
||||
# Commit any remaining changes
|
||||
db.commit()
|
||||
|
||||
f.close()
|
||||
logger.info("import_printers_complete", **result)
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
result['errors'].append(f"Fatal error: {str(e)}")
|
||||
logger.error("import_printers_failed", error=str(e))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def import_setup(db: Session, file_path: str) -> Dict[str, Any]:
|
||||
"""Import SETUP.csv → Setup model (clears and re-inserts)."""
|
||||
result = {'success': 0, 'errors': [], 'total_rows': 0}
|
||||
|
||||
try:
|
||||
# Clear existing setup records (typically only one row in legacy system)
|
||||
db.query(Setup).delete()
|
||||
db.commit()
|
||||
|
||||
f, encoding = open_text_with_fallbacks(file_path)
|
||||
reader = csv.DictReader(f)
|
||||
|
||||
batch = []
|
||||
for row_num, row in enumerate(reader, start=2):
|
||||
result['total_rows'] += 1
|
||||
|
||||
try:
|
||||
# Parse default_printer as integer if present
|
||||
default_printer = None
|
||||
default_printer_str = clean_string(row.get('Default_Printer'))
|
||||
if default_printer_str:
|
||||
try:
|
||||
default_printer = int(default_printer_str)
|
||||
except ValueError:
|
||||
result['errors'].append(f"Row {row_num}: Invalid Default_Printer '{default_printer_str}'")
|
||||
|
||||
record = Setup(
|
||||
appl_title=clean_string(row.get('Appl_Title')),
|
||||
l_head1=clean_string(row.get('L_Head1')),
|
||||
l_head2=clean_string(row.get('L_Head2')),
|
||||
l_head3=clean_string(row.get('L_Head3')),
|
||||
l_head4=clean_string(row.get('L_Head4')),
|
||||
l_head5=clean_string(row.get('L_Head5')),
|
||||
l_head6=clean_string(row.get('L_Head6')),
|
||||
l_head7=clean_string(row.get('L_Head7')),
|
||||
l_head8=clean_string(row.get('L_Head8')),
|
||||
l_head9=clean_string(row.get('L_Head9')),
|
||||
l_head10=clean_string(row.get('L_Head10')),
|
||||
default_printer=default_printer
|
||||
)
|
||||
batch.append(record)
|
||||
|
||||
if len(batch) >= BATCH_SIZE:
|
||||
db.bulk_save_objects(batch)
|
||||
db.commit()
|
||||
result['success'] += len(batch)
|
||||
batch = []
|
||||
|
||||
except Exception as e:
|
||||
result['errors'].append(f"Row {row_num}: {str(e)}")
|
||||
|
||||
if batch:
|
||||
db.bulk_save_objects(batch)
|
||||
db.commit()
|
||||
result['success'] += len(batch)
|
||||
|
||||
f.close()
|
||||
logger.info("import_setup_complete", **result)
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
result['errors'].append(f"Fatal error: {str(e)}")
|
||||
logger.error("import_setup_failed", error=str(e))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Core Data Table Imports
|
||||
# ============================================================================
|
||||
|
||||
81
app/main.py
81
app/main.py
@@ -246,7 +246,7 @@ app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||
VALID_IMPORT_TYPES: List[str] = [
|
||||
# Reference tables
|
||||
'trnstype', 'trnslkup', 'footers', 'filestat', 'employee',
|
||||
'gruplkup', 'filetype', 'fvarlkup', 'rvarlkup',
|
||||
'gruplkup', 'filetype', 'fvarlkup', 'rvarlkup', 'states', 'printers', 'setup',
|
||||
# Core data tables
|
||||
'rolodex', 'phone', 'rolex_v', 'files', 'files_r', 'files_v',
|
||||
'filenots', 'ledger', 'deposits', 'payments',
|
||||
@@ -259,11 +259,11 @@ VALID_IMPORT_TYPES: List[str] = [
|
||||
# Centralized import order for auto-import after upload
|
||||
# Reference tables first, then core tables, then specialized tables
|
||||
IMPORT_ORDER: List[str] = [
|
||||
# Reference tables
|
||||
'trnstype', 'trnslkup', 'footers', 'filestat', 'employee', 'gruplkup', 'filetype', 'fvarlkup', 'rvarlkup',
|
||||
# Core tables
|
||||
# Reference tables - import these first
|
||||
'trnstype', 'trnslkup', 'footers', 'filestat', 'employee', 'gruplkup', 'filetype', 'fvarlkup', 'rvarlkup', 'states', 'printers', 'setup',
|
||||
# Core tables - import after reference tables
|
||||
'rolodex', 'phone', 'rolex_v', 'files', 'files_r', 'files_v', 'filenots', 'ledger', 'deposits', 'payments',
|
||||
# Specialized tables
|
||||
# Specialized tables - import last
|
||||
'planinfo', 'qdros', 'pensions', 'pension_marriage', 'pension_death', 'pension_schedule', 'pension_separate', 'pension_results',
|
||||
]
|
||||
ORDER_INDEX: Dict[str, int] = {t: i for i, t in enumerate(IMPORT_ORDER)}
|
||||
@@ -272,6 +272,9 @@ ORDER_INDEX: Dict[str, int] = {t: i for i, t in enumerate(IMPORT_ORDER)}
|
||||
def get_import_type_from_filename(filename: str) -> str:
|
||||
"""
|
||||
Determine import type based on filename pattern for legacy CSV files.
|
||||
|
||||
Supports both legacy CSV naming (e.g., FILES.csv, LEDGER.csv) and
|
||||
model class naming (e.g., LegacyFile.csv, Ledger.csv).
|
||||
|
||||
Args:
|
||||
filename: Name of the uploaded CSV file
|
||||
@@ -294,7 +297,7 @@ def get_import_type_from_filename(filename: str) -> str:
|
||||
return 'filestat'
|
||||
if 'EMPLOYEE' in base:
|
||||
return 'employee'
|
||||
if 'GRUPLKUP' in base or 'GROUPLKUP' in base:
|
||||
if 'GRUPLKUP' in base or 'GROUPLKUP' in base or base == 'GROUPLKUP':
|
||||
return 'gruplkup'
|
||||
if 'FILETYPE' in base:
|
||||
return 'filetype'
|
||||
@@ -302,27 +305,53 @@ def get_import_type_from_filename(filename: str) -> str:
|
||||
return 'fvarlkup'
|
||||
if 'RVARLKUP' in base:
|
||||
return 'rvarlkup'
|
||||
if 'STATES' in base or base == 'STATES':
|
||||
return 'states'
|
||||
if 'PRINTERS' in base or base == 'PRINTERS':
|
||||
return 'printers'
|
||||
if 'SETUP' in base or base == 'SETUP':
|
||||
return 'setup'
|
||||
|
||||
# Core data tables
|
||||
# Core data tables - check most specific patterns first
|
||||
# Check for ROLEX_V and ROLEXV before ROLEX
|
||||
if 'ROLEX_V' in base or 'ROLEXV' in base:
|
||||
return 'rolex_v'
|
||||
if 'ROLODEX' in base or 'ROLEX' in base:
|
||||
return 'rolodex'
|
||||
|
||||
# Check for FILES_R, FILES_V, FILENOTS before generic FILES
|
||||
if 'FILES_R' in base or 'FILESR' in base:
|
||||
return 'files_r'
|
||||
if 'FILES_V' in base or 'FILESV' in base:
|
||||
return 'files_v'
|
||||
if 'FILENOTS' in base or 'FILE_NOTS' in base:
|
||||
if 'FILENOTS' in base or 'FILE_NOTS' in base or base == 'FILENOTS':
|
||||
return 'filenots'
|
||||
if 'FILES' in base or 'FILE' in base:
|
||||
|
||||
# Check for model class name "LEGACYFILE" before generic "FILE"
|
||||
if base == 'LEGACYFILE':
|
||||
return 'files'
|
||||
if 'PHONE' in base:
|
||||
if 'FILES' in base:
|
||||
return 'files'
|
||||
# Only match generic "FILE" if it's the exact base name or starts with it
|
||||
if base == 'FILE' or base.startswith('FILE_'):
|
||||
return 'files'
|
||||
|
||||
# ROLODEX variations
|
||||
if 'ROLODEX' in base or base == 'ROLEX':
|
||||
return 'rolodex'
|
||||
|
||||
# PHONE variations (including model class name LEGACYPHONE)
|
||||
if 'PHONE' in base or base == 'LEGACYPHONE':
|
||||
return 'phone'
|
||||
|
||||
# LEDGER
|
||||
if 'LEDGER' in base:
|
||||
return 'ledger'
|
||||
|
||||
# DEPOSITS
|
||||
if 'DEPOSITS' in base or 'DEPOSIT' in base:
|
||||
return 'deposits'
|
||||
if 'PAYMENTS' in base or 'PAYMENT' in base:
|
||||
|
||||
# PAYMENTS (including model class name LEGACYPAYMENT)
|
||||
if 'PAYMENTS' in base or 'PAYMENT' in base or base == 'LEGACYPAYMENT':
|
||||
return 'payments'
|
||||
|
||||
# Specialized tables
|
||||
@@ -330,17 +359,23 @@ def get_import_type_from_filename(filename: str) -> str:
|
||||
return 'planinfo'
|
||||
if 'QDROS' in base or 'QDRO' in base:
|
||||
return 'qdros'
|
||||
if 'MARRIAGE' in base:
|
||||
|
||||
# Pension sub-tables - check most specific first
|
||||
if 'MARRIAGE' in base or base == 'PENSIONMARRIAGE':
|
||||
return 'pension_marriage'
|
||||
if 'DEATH' in base:
|
||||
if 'DEATH' in base or base == 'PENSIONDEATH':
|
||||
return 'pension_death'
|
||||
if 'SCHEDULE' in base:
|
||||
if 'SCHEDULE' in base or base == 'PENSIONSCHEDULE':
|
||||
return 'pension_schedule'
|
||||
if 'SEPARATE' in base:
|
||||
if 'SEPARATE' in base or base == 'PENSIONSEPARATE':
|
||||
return 'pension_separate'
|
||||
if 'RESULTS' in base:
|
||||
if 'RESULTS' in base or base == 'PENSIONRESULTS':
|
||||
return 'pension_results'
|
||||
if 'PENSIONS' in base or 'PENSION' in base:
|
||||
|
||||
# Generic PENSIONS - check last after specific pension tables
|
||||
if 'PENSIONS' in base or base == 'PENSIONS':
|
||||
return 'pensions'
|
||||
if base == 'PENSION':
|
||||
return 'pensions'
|
||||
|
||||
raise ValueError(f"Unknown file type for filename: {filename}")
|
||||
@@ -1050,6 +1085,9 @@ def process_csv_import(db: Session, import_type: str, file_path: str) -> Dict[st
|
||||
'filetype': import_legacy.import_filetype,
|
||||
'fvarlkup': import_legacy.import_fvarlkup,
|
||||
'rvarlkup': import_legacy.import_rvarlkup,
|
||||
'states': import_legacy.import_states,
|
||||
'printers': import_legacy.import_printers,
|
||||
'setup': import_legacy.import_setup,
|
||||
|
||||
# Core data tables
|
||||
'rolodex': import_legacy.import_rolodex,
|
||||
@@ -2146,7 +2184,7 @@ async def admin_panel(request: Request, db: Session = Depends(get_db)):
|
||||
Footers, FileStat, Employee, GroupLkup, FileType,
|
||||
Qdros, PlanInfo, Pensions, PensionMarriage, PensionDeath,
|
||||
PensionSchedule, PensionSeparate, PensionResults,
|
||||
RolexV, FVarLkup, RVarLkup,
|
||||
RolexV, FVarLkup, RVarLkup, States, Printers, Setup,
|
||||
# Modern tables
|
||||
Client, Phone, Case, Transaction, Payment, Document
|
||||
)
|
||||
@@ -2162,6 +2200,9 @@ async def admin_panel(request: Request, db: Session = Depends(get_db)):
|
||||
'FileType': db.query(FileType).count(),
|
||||
'FVarLkup': db.query(FVarLkup).count(),
|
||||
'RVarLkup': db.query(RVarLkup).count(),
|
||||
'States': db.query(States).count(),
|
||||
'Printers': db.query(Printers).count(),
|
||||
'Setup': db.query(Setup).count(),
|
||||
},
|
||||
'core': {
|
||||
'Rolodex': db.query(Rolodex).count(),
|
||||
|
||||
103
docs/UPLOAD_FIX.md
Normal file
103
docs/UPLOAD_FIX.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Upload Detection Fix Summary
|
||||
|
||||
## Problem
|
||||
Files uploaded to the admin panel were being detected as "unknown" when using model class names instead of legacy CSV names.
|
||||
|
||||
## Solution Implemented
|
||||
|
||||
### 1. Enhanced Filename Detection
|
||||
Updated `get_import_type_from_filename()` in `app/main.py` to recognize both:
|
||||
- **Legacy CSV names**: `FILES.csv`, `LEDGER.csv`, `PAYMENTS.csv`
|
||||
- **Model class names**: `LegacyFile.csv`, `Ledger.csv`, `LegacyPayment.csv`
|
||||
|
||||
### 2. Added Support for Additional Tables
|
||||
Added import functions and detection for three previously unsupported tables:
|
||||
- **States** (STATES.csv) - US state abbreviations
|
||||
- **Printers** (PRINTERS.csv) - Printer configuration
|
||||
- **Setup** (SETUP.csv) - Application configuration
|
||||
|
||||
These are reference tables that should be imported early in the process.
|
||||
|
||||
## Filename Variations Now Supported
|
||||
|
||||
### Core Data Tables
|
||||
| Model Class | Supported Filenames | Import Type |
|
||||
|------------|---------------------|-------------|
|
||||
| LegacyFile | `FILES.csv`, `FILE.csv`, `LegacyFile.csv` | `files` |
|
||||
| FilesR | `FILES_R.csv`, `FILESR.csv`, `FilesR.csv` | `files_r` |
|
||||
| FilesV | `FILES_V.csv`, `FILESV.csv`, `FilesV.csv` | `files_v` |
|
||||
| FileNots | `FILENOTS.csv`, `FILE_NOTS.csv`, `FileNots.csv` | `filenots` |
|
||||
| Ledger | `LEDGER.csv`, `Ledger.csv` | `ledger` |
|
||||
| LegacyPayment | `PAYMENTS.csv`, `PAYMENT.csv`, `LegacyPayment.csv` | `payments` |
|
||||
| LegacyPhone | `PHONE.csv`, `LegacyPhone.csv` | `phone` |
|
||||
|
||||
### New Reference Tables
|
||||
| Model Class | Supported Filenames | Import Type |
|
||||
|------------|---------------------|-------------|
|
||||
| States | `STATES.csv`, `States.csv` | `states` |
|
||||
| Printers | `PRINTERS.csv`, `Printers.csv` | `printers` |
|
||||
| Setup | `SETUP.csv`, `Setup.csv` | `setup` |
|
||||
|
||||
## For Existing Unknown Files
|
||||
|
||||
If you have files already uploaded as `unknown_*.csv`, you have two options:
|
||||
|
||||
### Option 1: Re-upload with Correct Names
|
||||
1. Delete the unknown files from the admin panel
|
||||
2. Re-upload with any of the supported filename variations above
|
||||
3. Files will now be auto-detected correctly
|
||||
|
||||
### Option 2: Use the Map Functionality
|
||||
1. In the admin panel, find the "Unknown Data" section
|
||||
2. Select the unknown files you want to map
|
||||
3. Choose the target import type from the dropdown (e.g., `files`, `ledger`, `payments`)
|
||||
4. Click "Map Selected" to rename them with the correct prefix
|
||||
5. Import them using the import button
|
||||
|
||||
## Checking Unknown Files
|
||||
To identify what type an unknown file might be, you can check its header row:
|
||||
|
||||
```bash
|
||||
head -1 data-import/unknown_*.csv
|
||||
```
|
||||
|
||||
Common headers:
|
||||
- **LEDGER**: `File_No,Date,Item_No,Empl_Num,T_Code,T_Type,T_Type_L,Quantity,Rate,Amount,Billed,Note`
|
||||
- **STATES**: `Abrev,St`
|
||||
- **PRINTERS**: `Number,Name,Port,Page_Break,Setup_St,...`
|
||||
- **SETUP**: `Appl_Title,L_Head1,L_Head2,L_Head3,...`
|
||||
|
||||
## Note on TRNSACTN Files
|
||||
If you see unknown files with headers like:
|
||||
```
|
||||
File_No,Id,Footer_Code,Date,Item_No,Empl_Num,T_Code,T_Type,T_Type_L,Quantity,Rate,Amount,Billed,Note
|
||||
```
|
||||
|
||||
These are **TRNSACTN** files (transaction join tables). TRNSACTN is a legacy reporting view that combines LEDGER with related tables. Currently, TRNSACTN import is not supported because it's a derived/joined view. The data should be imported via the individual tables (LEDGER, FILES, etc.) instead.
|
||||
|
||||
## Testing the Fix
|
||||
|
||||
1. Try uploading a file named `LegacyFile.csv` - should be detected as `files`
|
||||
2. Try uploading `Ledger.csv` - should be detected as `ledger`
|
||||
3. Try uploading `States.csv` - should be detected as `states`
|
||||
4. Check the admin panel to see files grouped by their detected type (not "unknown")
|
||||
5. Import as normal using the import buttons
|
||||
|
||||
## Changes Made
|
||||
|
||||
### Files Modified:
|
||||
- `app/main.py`:
|
||||
- Enhanced `get_import_type_from_filename()` with model class name detection
|
||||
- Added `states`, `printers`, `setup` to `VALID_IMPORT_TYPES`
|
||||
- Added new tables to `IMPORT_ORDER`
|
||||
- Added import functions to `process_csv_import()`
|
||||
- Updated `table_counts` in admin panel to show new tables
|
||||
|
||||
- `app/import_legacy.py`:
|
||||
- Added `import_states()` function
|
||||
- Added `import_printers()` function
|
||||
- Added `import_setup()` function
|
||||
- Imported States, Printers, Setup models
|
||||
|
||||
No database schema changes were needed - all three models already existed.
|
||||
|
||||
Reference in New Issue
Block a user