Refactored some code, downgraded the logger level almost everywhere and fixed minor bugs

master
myumyu 3 years ago
parent d29c673e7d
commit 2630492c84
  1. 24
      evaluator.py
  2. 44
      main.py

@ -7,29 +7,21 @@ from config import config
class Evaluator:
def __init__(self):
self.blacklist_re = re.compile('|'.join(config.BLACKLIST), re.IGNORECASE)
logging.info(f"Compiled blacklist regex: {self.blacklist_re}")
self.url_blacklist_re = re.compile(f'(?!{"|".join(config.URL_WHITELIST)})', re.IGNORECASE)
self.url_blacklist_re = re.compile(f'(?!{"|".join(config.URL_WHITELIST)})',
re.IGNORECASE) # "inverted" so anything not whitelisted will match it
logging.info(f"Compiled url blacklist regex: {self.url_blacklist_re}")
logging.debug(f'Compiled, url blacklist regex: {self.url_blacklist_re}\n blacklist regex: {self.blacklist_re}')
self._extractor = URLExtract(extract_localhost=False)
self.__extractor = URLExtract(extract_localhost=False)
def evaluate(self, text):
trigger_urls = []
trigger_entries = []
if not text:
return [], []
if text and self.url_blacklist_re: # searches for blacklisted (extracted) urls
trigger_urls = list(filter(
self.url_blacklist_re.match,
self._extractor.find_urls(text, only_unique=True)
))
trigger_urls = [*filter(self.url_blacklist_re.match, self.__extractor.find_urls(text, only_unique=True))]
trigger_entries = [entry.group(0) for entry in re.finditer(self.blacklist_re, text)]
if text and self.blacklist_re: # searches for blacklisted text
trigger_entries = re.findall(self.blacklist_re, text)
logging.debug(f'Evaluated: {text}\n{trigger_urls}\n{trigger_entries}')
logging.debug(f"Evaluated: {text}\n{trigger_urls}\n{trigger_entries}")
return trigger_urls, trigger_entries

@ -10,19 +10,20 @@ from config import config
from evaluator import Evaluator
def build_post_path(post):
return f'>>>/{post["board"]}/{post["thread"] or post["postId"]} ({post["postId"]})'
def get_path(post): return f'>>>/{post["board"]}/{post["thread"] or post["postId"]} ({post["postId"]})'
def send_notification(title, body):
logging.info(f'Sending notification: {title} {body}')
logging.debug(f'Sending notification: {title} {body}')
subprocess.call(['termux-notification', '--title', title, '--content', body] if config.USE_TERMUX_API
else ['notify-send', title, body])
def watch_live_posts(evaluate, notify):
def get_auth_cookie():
logging.info("Requesting new cookie to watch live posts")
logging.debug('Requesting new cookie to watch live posts')
return requests.post(
url=f'https://{config.IB_DOMAIN_NAME}/forms/login',
data={'username': config.GLOBAL_MOD_USERNAME, 'password': config.GLOBAL_MOD_PASSWORD},
@ -35,24 +36,26 @@ def watch_live_posts(evaluate, notify):
def on_new_post(post):
urls, entries = evaluate(post["nomarkup"])
if urls or entries:
notify(f'Alert! {build_post_path(post)}',
"\n".join(urls + entries))
notify(f'Alert! {get_path(post)}', '\n'.join(urls) + '\n'.join(entries))
while True:
try:
client.connect(f'wss://{config.IB_DOMAIN_NAME}/', headers={'Cookie': get_auth_cookie()})
client.emit('room', 'globalmanage-recent-hashed')
except Exception as e:
notify(f"Lost connection", f"{e}\nRetrying in {config.LIVE_POSTS_RETRY_TIMEOUT} seconds")
logging.error(f'Exception in live posts watcher: {e}')
notify(f'Lost live posts connection', f'Retrying in {config.LIVE_POSTS_RETRY_TIMEOUT} seconds')
time.sleep(config.LIVE_POSTS_RETRY_TIMEOUT) # waits for a bit, maybe will fix itself
notify(f"Connected", f"Watching live posts")
notify(f'Connected', f'Watching live posts')
client.wait() # blocks the thread until something happens
def watch_reports(notify):
def get_auth_session():
logging.info("Starting new authenticated session to fetch reports")
logging.debug('Starting new authenticated session to fetch reports')
s = requests.Session()
s.post(
url=f'https://{config.IB_DOMAIN_NAME}/forms/login',
@ -66,26 +69,27 @@ def watch_reports(notify):
previous = 0
while True:
time.sleep(config.FETCH_REPORTS_INTERVAL)
reply = session.get(f"https://{config.IB_DOMAIN_NAME}/globalmanage/reports.json")
reply = session.get(f'https://{config.IB_DOMAIN_NAME}/globalmanage/reports.json')
if reply.status_code != 200:
notify(f"Error while fetching reports",
f"{reply.status_code}\nRetrying in {config.FETCH_REPORTS_INTERVAL} seconds")
logging.error(f'Error while fetching reports: {reply.status_code}')
notify(f'Error while fetching reports', f'Retrying in {config.FETCH_REPORTS_INTERVAL} seconds')
session = get_auth_session()
continue
entries = reply.json()["reports"]
current = len(entries) # number of posts reported (not the number of reports)
reported_posts = reply.json()["reports"]
current = len(reported_posts)
if 0 < current != previous:
notify(f"New reports!",
"\n".join([f'{build_post_path(entry)} {[report["reason"] for report in entry["globalreports"]]}'
for entry in entries]))
notify(f'New reports!',
"\n".join([f'{get_path(p)} {[r["reason"] for r in p["globalreports"]]}' for p in reported_posts]))
previous = current
def main():
logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
# launches live post watcher
# launches live posts watcher
live_posts_watcher = Thread(target=watch_live_posts, args=(Evaluator().evaluate, send_notification,))
live_posts_watcher.daemon = True
live_posts_watcher.start()
@ -99,5 +103,5 @@ def main():
reports_watcher.join()
if __name__ == "__main__":
if __name__ == '__main__':
main()

Loading…
Cancel
Save