Skip to content

Commit cc2187d

Browse files
committed
Org membership importer
1 parent 526c0e1 commit cc2187d

20 files changed

+4227
-551
lines changed

astra_app/core/admin.py

Lines changed: 319 additions & 189 deletions
Large diffs are not rendered by default.
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
from collections.abc import Mapping, Sequence
2+
from typing import Any
3+
4+
from django.core.paginator import Paginator
5+
6+
7+
def build_import_preview_context(
8+
*,
9+
valid_rows: Sequence[Any],
10+
request_get: Mapping[str, Any],
11+
instance_decision_attr: str,
12+
per_page_default: int = 50,
13+
per_page_min: int = 50,
14+
) -> dict[str, Any]:
15+
matches: list[Any] = []
16+
skipped: list[Any] = []
17+
match_row_numbers: list[int] = []
18+
19+
for index, row_result in enumerate(valid_rows, start=1):
20+
instance = row_result.instance if hasattr(row_result, "instance") else None
21+
import_type = str(getattr(row_result, "import_type", "") or "")
22+
if import_type:
23+
is_match = import_type != "skip"
24+
elif instance is not None and hasattr(instance, instance_decision_attr):
25+
is_match = getattr(instance, instance_decision_attr) == "IMPORT"
26+
else:
27+
is_match = False
28+
29+
row_number = getattr(row_result, "number", None)
30+
if row_number is None:
31+
row_number = getattr(getattr(row_result, "row", None), "number", None)
32+
if row_number is None:
33+
row_number = getattr(getattr(row_result, "original", None), "number", None)
34+
if row_number is None:
35+
row_number = index
36+
37+
try:
38+
astra_row_number = int(row_number)
39+
except (TypeError, ValueError):
40+
astra_row_number = index
41+
row_result.astra_row_number = astra_row_number
42+
43+
if is_match:
44+
matches.append(row_result)
45+
match_row_numbers.append(astra_row_number)
46+
else:
47+
skipped.append(row_result)
48+
49+
try:
50+
per_page = int(str(request_get.get("per_page", str(per_page_default))))
51+
except ValueError:
52+
per_page = per_page_default
53+
per_page = max(per_page, per_page_min)
54+
55+
matches_page_obj = Paginator(matches, per_page).get_page(request_get.get("matches_page") or "1")
56+
skipped_page_obj = Paginator(skipped, per_page).get_page(request_get.get("skipped_page") or "1")
57+
58+
return {
59+
"matches_page_obj": matches_page_obj,
60+
"skipped_page_obj": skipped_page_obj,
61+
"match_row_numbers": sorted(set(match_row_numbers)),
62+
"preview_summary": {
63+
"total": len(valid_rows),
64+
"to_import": len(matches),
65+
"skipped": len(skipped),
66+
},
67+
}

astra_app/core/csv_import_utils.py

Lines changed: 82 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,94 @@
11
import csv
22
import datetime
33
import io
4+
import secrets
5+
from collections.abc import Mapping, Sequence
6+
from typing import Any
47

8+
from dateutil import parser
9+
from django import forms
10+
from django.core.cache import cache
511
from django.core.files.uploadedfile import UploadedFile
12+
from django.urls import reverse
13+
from tablib import Dataset
614

715
from core.views_utils import _normalize_str
816

17+
AUTO_DETECT_CHOICE: tuple[str, str] = ("", "Auto-detect")
18+
19+
20+
def build_csv_header_choices(headers: Sequence[str]) -> list[tuple[str, str]]:
21+
return [AUTO_DETECT_CHOICE, *[(header, header) for header in headers]]
22+
23+
24+
def set_form_column_field_choices(
25+
*,
26+
form: forms.Form,
27+
field_names: Sequence[str],
28+
headers: Sequence[str],
29+
) -> None:
30+
choices = build_csv_header_choices(headers)
31+
for field_name in field_names:
32+
if field_name in form.fields:
33+
form.fields[field_name].choices = choices
34+
935

1036
def norm_csv_header(value: str) -> str:
1137
return "".join(ch for ch in value.strip().lower() if ch.isalnum())
1238

1339

40+
def resolve_column_header(
41+
field_name: str,
42+
headers: Sequence[str],
43+
header_by_norm: Mapping[str, str],
44+
column_overrides: Mapping[str, str],
45+
*fallback_norms: str,
46+
) -> str | None:
47+
override = _normalize_str(column_overrides.get(field_name, ""))
48+
if override:
49+
if override in headers:
50+
return override
51+
52+
override_norm = norm_csv_header(override)
53+
resolved_override = header_by_norm.get(override_norm)
54+
if resolved_override:
55+
return resolved_override
56+
57+
raise ValueError(f"Column '{override}' not found in CSV headers")
58+
59+
for fallback in fallback_norms:
60+
fallback_norm = norm_csv_header(fallback)
61+
resolved = header_by_norm.get(fallback_norm)
62+
if resolved:
63+
return resolved
64+
return None
65+
66+
67+
def attach_unmatched_csv_to_result(
68+
result: Any,
69+
dataset: Dataset,
70+
cache_key_prefix: str,
71+
reverse_url_name: str,
72+
) -> None:
73+
token = secrets.token_urlsafe(16)
74+
cache_key = f"{cache_key_prefix}:{token}"
75+
csv_content = dataset.export("csv")
76+
cache.set(cache_key, csv_content, timeout=60 * 60)
77+
78+
download_url = reverse(reverse_url_name, kwargs={"token": token})
79+
# `Result` is a third-party import-export type with no extension hook;
80+
# dynamic attributes are used as a lightweight duck-typed contract.
81+
setattr(result, "unmatched_csv_content", csv_content)
82+
setattr(result, "unmatched_download_url", download_url)
83+
84+
85+
def sanitize_csv_cell(value: str) -> str:
86+
"""Prefix formula-starting characters to prevent spreadsheet formula injection."""
87+
if value and value[0] in ("=", "+", "-", "@", "\t", "\r"):
88+
return f"'{value}"
89+
return value
90+
91+
1492
def normalize_csv_email(value: object) -> str:
1593
return _normalize_str(value).lower()
1694

@@ -27,24 +105,17 @@ def parse_csv_bool(value: object) -> bool:
27105
return normalized in {"1", "y", "yes", "true", "t", "active", "activemember", "active member"}
28106

29107

30-
def parse_csv_date(value: object) -> datetime.datetime | None:
108+
def parse_csv_date(value: object) -> datetime.date | None:
31109
raw = _normalize_str(value)
32110
if not raw:
33111
return None
34112

35-
for fmt in ("%Y-%m-%d", "%Y/%m/%d", "%m/%d/%Y", "%m/%d/%y"):
36-
try:
37-
day = datetime.datetime.strptime(raw, fmt).date()
38-
except ValueError:
39-
continue
40-
return datetime.datetime.combine(day, datetime.time(0, 0, 0), tzinfo=datetime.UTC)
41-
42113
try:
43-
day = datetime.date.fromisoformat(raw)
44-
except ValueError:
114+
parsed = parser.parse(raw, dayfirst=False, yearfirst=False)
115+
except (parser.ParserError, TypeError, ValueError, OverflowError):
45116
return None
46117

47-
return datetime.datetime.combine(day, datetime.time(0, 0, 0), tzinfo=datetime.UTC)
118+
return parsed.date()
48119

49120

50121
def extract_csv_headers_from_uploaded_file(uploaded: UploadedFile) -> list[str]:

0 commit comments

Comments
 (0)