Skip to content

Commit

Permalink
Refactor radon complexity
Browse files Browse the repository at this point in the history
  • Loading branch information
torrua committed Apr 29, 2024
1 parent de23cd2 commit 8a736d6
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 49 deletions.
10 changes: 5 additions & 5 deletions app/site/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def redirect_columns():
@site_blueprint.route("/")
@site_blueprint.route("/home")
def home():
article = get_data(MAIN_SITE).get("content").body.find("div", {"id": "content"})
article = get_data(MAIN_SITE).body.find("div", {"id": "content"})
for bq in article.findAll("blockquote"):
bq["class"] = "blockquote"

Expand All @@ -52,23 +52,23 @@ def home():

@site_blueprint.route("/articles")
def articles():
article_block = get_data(MAIN_SITE).get("content")
article_block = get_data(MAIN_SITE)
title = article_block.find("a", {"name": "articles"}).find_parent("h2")
content = title.find_next("ol")
return render_template("articles.html", articles=content, title=title.get_text())


@site_blueprint.route("/texts")
def texts():
article_block = get_data(MAIN_SITE).get("content")
article_block = get_data(MAIN_SITE)
title = article_block.find("a", {"name": "texts"}).find_parent("h2")
content = title.find_next("ol")
return render_template("articles.html", articles=content, title=title.get_text())


@site_blueprint.route("/columns")
def columns():
article_block = get_data(MAIN_SITE)["content"]
article_block = get_data(MAIN_SITE)
title = article_block.find("a", {"name": "columns"}).find_parent("h2")
content = title.find_next("ul")
return render_template("articles.html", articles=content, title=title.get_text())
Expand Down Expand Up @@ -175,7 +175,7 @@ def search_log(word, event_id, is_case_sensitive, nothing):
@site_blueprint.route("/<string:section>/<string:article>", methods=["GET"])
def proxy(section: str = "", article: str = ""):
url = f"{MAIN_SITE}{section}/{article}"
content = get_data(url).get("content").body
content = get_data(url).body

for bq in content.findAll("blockquote"):
bq["class"] = "blockquote"
Expand Down
64 changes: 20 additions & 44 deletions app/site/functions.py
Original file line number Diff line number Diff line change
@@ -1,71 +1,47 @@
from __future__ import annotations

import re
import urllib

from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
from bs4 import BeautifulSoup

from app.logger import log


def get_data(
url: str, parser: str = "lxml", headers: dict = None
) -> dict[str, bool | str | BeautifulSoup]:
def get_data(url: str) -> str | BeautifulSoup:
"""
This function downloads and parses content of URL site
:url: address of needed site or directory
:return: dict with elements:
> :"result": *bool* with result of downloading process
> :"content": *BeautifulSoup* with elements if Result is True
OR
*str* with error message if Result is False
:return: *BeautifulSoup* OR
*str* with error message if Result is False
"""
cntnt, rslt, msg = "content", "result", "message"
pattern_http = "^http"
m_l = {
"start": "Начинаем загрузку данных с сайта",
"error": "Не удалось получить данные:\n\t>> Адрес:\t%s\n\t>> Ошибка:\t%s",
"get_site": "Пробуем скачать данные с ресурса",
"url_check": "Проверяем, являются ли введенные данные адресом веб-страницы",
"url_correct": "Введен корректный адрес веб-страницы:\t%s",
"path_check": "Проверяем, являются ли введенные данные адресом файла \n\t>> Адрес:\t%s",
"parse": "Пробуем обработать полученные данные",
"agent": "Содержимое строки headers:\n\t>>\t%s",
"success": "Данные с сайта успешно загружены",
}

log.info(m_l["start"])
log.debug(m_l["url_check"])

if re.match(pattern_http, url):
log.debug(m_l["url_correct"], url)
try:
log.debug(m_l["get_site"])
if url.lower().startswith("http"):
request_to_site = urllib.request.Request(
url=url, headers=headers if headers else {}
)
else:
raise ValueError from None
with urllib.request.urlopen(request_to_site) as response:
try:
log.debug(m_l["parse"])
site_data = BeautifulSoup(response, parser)
except urllib.error.HTTPError as err:
log.error(m_l["error"], *(url, err))
return {rslt: False, cntnt: str(err), msg: 5152}
except urllib.error.URLError as err:
log.error(m_l["error"], url, err)
log.error(m_l["agent"], headers)
return {rslt: False, cntnt: str(err), msg: 5152}
else:
log.debug(m_l["path_check"], url)
try:
log.debug(m_l["get_site"])
site_data = BeautifulSoup(open(url), parser)
except (FileNotFoundError, UnicodeDecodeError) as err:
log.error(m_l["error"], *(url, err))
return {rslt: False, cntnt: str(err), msg: 5152}
if not re.match(pattern_http, url):
return ""

log.debug(m_l["url_correct"], url)
log.debug(m_l["get_site"])

try:
with urlopen(Request(url=url)) as response:
log.debug(m_l["parse"])
soup = BeautifulSoup(response, "lxml")
log.info(m_l["success"])
return soup

log.info(m_l["success"])
return {rslt: True, cntnt: site_data, msg: None}
except (URLError, HTTPError) as err:
log.error(m_l["error"], url, err)
return str(err)

0 comments on commit 8a736d6

Please sign in to comment.