From 76193a2e8473bbc1d43899359693f391e7f1fa96 Mon Sep 17 00:00:00 2001 From: MMaker Date: Fri, 31 Jan 2025 14:32:19 -0500 Subject: [PATCH] Add logging --- app.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/app.py b/app.py index bbae589..15ed3bb 100644 --- a/app.py +++ b/app.py @@ -4,6 +4,10 @@ import requests from bs4 import BeautifulSoup from flask import Flask, Response from diskcache import Cache +import logging + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) app = Flask(__name__) @@ -15,7 +19,7 @@ cookie_jar = http.cookiejar.MozillaCookieJar('cookies.txt') try: cookie_jar.load(ignore_discard=True, ignore_expires=True) except FileNotFoundError: - print("cookies.txt not found, starting with empty cookie jar") + logger.warning("cookies.txt not found, starting with empty cookie jar") s = requests.Session() s.headers.update({ @@ -25,16 +29,21 @@ s.cookies = cookie_jar # type: ignore @app.route("/watch/") def proxy(video_id): + logger.info(f"Received request for video ID: {video_id}") + cached_html = cache.get(video_id) if cached_html is not None: + logger.info(f"Using cached response for video ID: {video_id}") return Response(cached_html, mimetype="text/html") # type: ignore # Not in cache or cache expired; fetch from nicovideo.jp real_url = f"https://www.nicovideo.jp/watch/{video_id}" try: + logger.info(f"Fetching content from URL: {real_url}") r = s.get(real_url, timeout=10) except requests.RequestException as e: - return Response(f"Error fetching the page: {e}", status=500) + logger.error(f"Error fetching the page for video ID '{video_id}': {e}") + return Response(status=500) soup = BeautifulSoup(r.text, "html.parser") thumbnail_url = None @@ -49,7 +58,8 @@ def proxy(video_id): params["video"]["thumbnail"].get("middleUrl") or params["video"]["thumbnail"].get("url") ) - except (KeyError, json.JSONDecodeError): + except (KeyError, json.JSONDecodeError) as e: + logger.warning(f"Failed to extract thumbnail info for video ID '{video_id}': {e}") pass og_tags = soup.find_all("meta", property=lambda x: x) # type: ignore