refactor(pages): unify code for page import
This commit is contained in:
parent
77a984170c
commit
28d905a24c
|
@ -1,16 +1,26 @@
|
|||
from pathlib import Path
|
||||
|
||||
from django.contrib import admin
|
||||
from django.contrib import admin, messages
|
||||
|
||||
from .models import Page
|
||||
from .parser import import_pages
|
||||
|
||||
|
||||
def reimport(modeladmin, request, queryset):
|
||||
for page in queryset:
|
||||
path = Path(__file__).resolve().parent / "default_content" / f"{page.url}.html"
|
||||
if path.exists():
|
||||
page.content = path.read_text()
|
||||
page.save()
|
||||
num_pages = len(list(import_pages(True, set(page.url for page in queryset))))
|
||||
|
||||
if num_pages == 0:
|
||||
modeladmin.message_user(
|
||||
request, "Es wurden keine Seiten reimportiert.", messages.WARNING
|
||||
)
|
||||
elif num_pages == 1:
|
||||
modeladmin.message_user(
|
||||
request, "Eine Seite wurde reimportiert.", messages.SUCCESS
|
||||
)
|
||||
else:
|
||||
modeladmin.message_user(
|
||||
request, f"{num_pages} Seiten wurden reimportiert.", messages.SUCCESS
|
||||
)
|
||||
|
||||
|
||||
@admin.register(Page)
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
from pathlib import Path
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from ...models import Page
|
||||
from ...parser import import_pages
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
@ -16,30 +13,9 @@ class Command(BaseCommand):
|
|||
parser.add_argument("pages", help="Specify which pages to import", nargs="*")
|
||||
|
||||
def handle(self, *args, **options):
|
||||
content_path = Path(__file__).resolve().parent.parent.parent / "default_content"
|
||||
for file in content_path.iterdir():
|
||||
if (pages := options["pages"]) and file.stem not in pages:
|
||||
continue
|
||||
pages = import_pages(options["force"], options["pages"])
|
||||
|
||||
slug = file.stem
|
||||
p, created = Page.objects.get_or_create(url=slug)
|
||||
if (not created) and (not options["force"]):
|
||||
continue
|
||||
|
||||
soup = BeautifulSoup(file.read_text(), "html.parser")
|
||||
|
||||
if soup.title:
|
||||
p.title = soup.title.string
|
||||
soup.title.decompose()
|
||||
else:
|
||||
p.title = slug.title()
|
||||
|
||||
if visible := soup.find("meta", attrs={"name": "visible"}):
|
||||
p.visible = "content" not in visible.attrs or visible.attrs[
|
||||
"content"
|
||||
].lower() in ("1", "true", "yes")
|
||||
visible.decompose()
|
||||
|
||||
p.content = str(soup).strip()
|
||||
p.save()
|
||||
print(f'created new page "{p.title}" for slug {slug}')
|
||||
for p in pages:
|
||||
self.stderr.write(
|
||||
self.style.SUCCESS(f'created new page "{p.title}" for slug {p.url}')
|
||||
)
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
from pathlib import Path
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from .models import Page
|
||||
|
||||
content_path = Path(__file__).resolve().parent / "default_content"
|
||||
|
||||
|
||||
def import_pages(force, pages):
|
||||
for file in content_path.iterdir():
|
||||
if pages and file.stem not in pages:
|
||||
continue
|
||||
|
||||
slug = file.stem
|
||||
p, created = Page.objects.get_or_create(url=slug)
|
||||
if not created and not force:
|
||||
continue
|
||||
|
||||
soup = BeautifulSoup(file.read_text(), "html.parser")
|
||||
|
||||
if soup.title:
|
||||
p.title = soup.title.string
|
||||
soup.title.decompose()
|
||||
else:
|
||||
p.title = slug.title()
|
||||
|
||||
if visible := soup.find("meta", attrs={"name": "visible"}):
|
||||
p.visible = "content" not in visible.attrs or visible.attrs[
|
||||
"content"
|
||||
].lower() in ("1", "true", "yes")
|
||||
visible.decompose()
|
||||
|
||||
p.content = str(soup).strip()
|
||||
p.save()
|
||||
|
||||
yield p
|
Loading…
Reference in New Issue