Coverage for an_website/soundboard/soundboard.py: 95.349%
86 statements
« prev ^ index » next coverage.py v7.6.4, created at 2024-11-16 19:56 +0000
« prev ^ index » next coverage.py v7.6.4, created at 2024-11-16 19:56 +0000
1# This program is free software: you can redistribute it and/or modify
2# it under the terms of the GNU Affero General Public License as
3# published by the Free Software Foundation, either version 3 of the
4# License, or (at your option) any later version.
5#
6# This program is distributed in the hope that it will be useful,
7# but WITHOUT ANY WARRANTY; without even the implied warranty of
8# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9# GNU Affero General Public License for more details.
10#
11# You should have received a copy of the GNU Affero General Public License
12# along with this program. If not, see <https://www.gnu.org/licenses/>.
14"""The soundboard of the website."""
16from __future__ import annotations
18from collections.abc import Callable, Iterable
19from functools import cache
20from typing import ClassVar
22from tornado.web import HTTPError
24from ..utils.request_handler import HTMLRequestHandler
25from .data import (
26 ALL_SOUNDS,
27 MAIN_PAGE_INFO,
28 PERSON_SHORTS,
29 PERSON_SOUNDS,
30 HeaderInfo,
31 Info,
32 Person,
33 SoundInfo,
34)
37@cache
38def get_rss_str(path: str, protocol_and_host: str) -> None | str:
39 """Return the RSS string for the given path."""
40 if path is not None:
41 path = path.lower()
43 if path in {None, "/", ""}:
44 _infos: Iterable[SoundInfo] = ALL_SOUNDS
45 elif path in PERSON_SOUNDS:
46 _infos = PERSON_SOUNDS[path]
47 else:
48 return None
49 return "\n".join(
50 sound_info.to_rss(protocol_and_host) for sound_info in _infos
51 )
54async def search_main_page_info(
55 check_func: Callable[[SoundInfo], bool],
56 info_list: Iterable[Info] = MAIN_PAGE_INFO,
57) -> list[Info]:
58 # pylint: disable=confusing-consecutive-elif
59 """Get an info list based on the query and the check_func and return it."""
60 found: list[Info] = []
61 for info in info_list:
62 if isinstance(info, SoundInfo):
63 if check_func(info):
64 found.append(info)
65 elif isinstance(info, HeaderInfo):
66 tag = info.tag
67 while ( # pylint: disable=while-used
68 len(found) > 0
69 and isinstance(last := found[-1], HeaderInfo)
70 and (
71 tag
72 in (
73 "h1", # if it gets to h3 this doesn't work as
74 # then this should also be done for h2 when the ones
75 # before are h3
76 last.tag,
77 )
78 )
79 ):
80 del found[-1]
81 found.append(info)
83 # pylint: disable=while-used
84 while len(found) > 0 and isinstance(found[-1], HeaderInfo):
85 del found[-1]
87 return found
90class SoundboardHTMLHandler(HTMLRequestHandler):
91 """The request handler for the HTML pages."""
93 async def get(self, path: str = "/", *, head: bool = False) -> None:
94 """Handle GET requests and generate the page content."""
95 if path is not None:
96 path = path.lower()
98 parsed_info = await self.parse_path(path)
99 if parsed_info is None:
100 raise HTTPError(404, reason="Page not found")
102 if head:
103 return
105 self.update_title_and_desc(path)
107 await self.render(
108 "pages/soundboard.html",
109 sound_info_list=parsed_info[0],
110 query=parsed_info[1],
111 feed_url=self.fix_url(
112 (
113 f"/soundboard/{path.strip('/')}/feed"
114 if path and path != "/" and path != "personen"
115 else "/soundboard/feed"
116 ),
117 ),
118 )
120 async def parse_path(
121 self, path: None | str
122 ) -> None | tuple[Iterable[Info], None | str]:
123 """Get an info list based on the path and return it with the query."""
124 if path in {None, "", "index", "/"}:
125 return MAIN_PAGE_INFO, None
127 if path in {"persons", "personen"}:
128 persons_list: list[Info] = []
129 for key, person_sounds in PERSON_SOUNDS.items():
130 persons_list.append(HeaderInfo(Person[key].value, type=Person))
131 persons_list += person_sounds
132 return persons_list, None
134 if path in {"search", "suche"}:
135 query = self.get_argument("q", "")
136 if not query:
137 return MAIN_PAGE_INFO, query
139 return (
140 await search_main_page_info(lambda info: info.contains(query)),
141 query,
142 )
144 if path in PERSON_SHORTS:
145 person = Person[path]
146 return (
147 await search_main_page_info(lambda info: info.person == person),
148 None,
149 )
151 return None
153 def update_title_and_desc(self, path: str) -> None:
154 """Update the title and description of the page."""
155 if path not in PERSON_SHORTS:
156 return
157 name = Person[path].value
158 if name.startswith("Das ") or name.startswith("Der "):
159 von_name = f"dem{name[3:]}"
160 no_article_name = name[4:]
161 elif name.startswith("Die "):
162 von_name = f"der{name[3:]}"
163 no_article_name = name[4:]
164 else:
165 von_name = name
166 no_article_name = name
168 self.short_title = f"Soundboard ({path.upper()})"
169 self.title = f"{no_article_name.replace(' ', '-')}-Soundboard"
170 self.description = (
171 "Ein Soundboard mit coolen Sprüchen und Sounds von "
172 f"{von_name} aus den Känguru-Chroniken"
173 )
176class SoundboardRSSHandler(SoundboardHTMLHandler):
177 """The request handler for the RSS feeds."""
179 POSSIBLE_CONTENT_TYPES: ClassVar[tuple[str, ...]] = (
180 "application/rss+xml",
181 "application/xml",
182 )
184 async def get(self, path: str = "/", *, head: bool = False) -> None:
185 """Handle GET requests and generate the feed content."""
186 rss_str = get_rss_str(
187 path, f"{self.request.protocol}://{self.request.host}"
188 )
190 if rss_str is not None:
191 if head:
192 return
193 self.update_title_and_desc(path)
194 return await self.render(
195 "rss/soundboard.xml",
196 found=True,
197 rss_str=rss_str,
198 )
199 self.set_status(404, reason="Feed not found")
200 return await self.render("rss/soundboard.xml", found=False, rss_str="")