From ce3e8f907a5d917c30c810fc40dbcac4215bb430 Mon Sep 17 00:00:00 2001 From: Tom Date: Sun, 17 Oct 2021 17:59:35 -0700 Subject: [PATCH] updated trisquel --- README.md | 4 +-- data.json | 5 +-- main.py | 35 +-------------------- projects/trisquel.py | 72 +++++++++++++++++++++++++++++++++----------- test.py | 4 +-- 5 files changed, 63 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index b53fdca..52dc4e0 100644 --- a/README.md +++ b/README.md @@ -70,13 +70,13 @@ puppylinux: https://distro.ibiblio.org/puppylinux/ check the ISO files or htm fi qtproject: https://download.qt.io/ racket: https://mirror.racket-lang.org/installers/ no public repo, no timestamp, no mirror status tracker make sure that we have the latest version number under racket-installers raspberry pi: https://archive.raspberrypi.org/ Checking the timestamp of either the Release file or the Packages file should suffice. -raspbian: http://archive.raspbian.org/raspbian/ snapshotindex.txt is most likely a timestamp, tho i'm not sure. also i think our mirror is completely outdated, it's not listed on official mirror list +raspbian: http://archive.raspbian.org/ snapshotindex.txt is most likely a timestamp, tho i'm not sure. also i think our mirror is completely outdated, it's not listed on official mirror list sagemath: same source tarballs as them (the sage-*.tar.gz files under 'Source Code') salt stack: checking the "Latest release" text under the 'About' header scientific: https://scientificlinux.org/downloads/sl-mirrors/ not checking this one since it's abandoned slackware: https://mirrors.slackware.com/slackware/ check whether we have each release and whether the timestamp for CHECKSUMS.md5 in each release is the same, for slackware-iso, just make sure that our list of directories is the same tdf: https://download.documentfoundation.org/ -trisquel: https://trisquel.info/mirmon/index.html out of date website!? please recheck this!!! +trisquel: http://archive.trisquel.info/trisquel/ https://trisquel.info/mirmon/index.html out of date website!? please recheck this!!! ubuntu: https://launchpad.net/ubuntu/+mirror/mirror.csclub.uwaterloo.ca-archive ubuntu-ports: http://ports.ubuntu.com/ubuntu-ports/ checking the Release files in dists ubuntu-ports-releases: https://cdimage.ubuntu.com/releases/ has public repo, no timestamp, no status tracker, brute force looped it diff --git a/data.json b/data.json index bd2ea6a..5d10254 100644 --- a/data.json +++ b/data.json @@ -273,8 +273,9 @@ "trisquel": { "out_of_sync_since": null, "out_of_sync_interval": 86400, - "csc": "", - "upstream": "https://trisquel.info/mirmon/index.html", + "csc": "trisquel/", + "upstream": "http://rsync.trisquel.info/trisquel/dists/", + "mirrors": ["https://mirror.fsf.org/trisquel-images/", "http://mirrors.ocf.berkeley.edu/trisquel-images/"], "file": "" }, "ubuntu_ports": { diff --git a/main.py b/main.py index 3031e47..10dbc56 100644 --- a/main.py +++ b/main.py @@ -9,41 +9,8 @@ import sys import requests from projects import * # noqa -# from dateparser.search import search_dates # this library seems to be super slow but the other library: dateutil.parser gets some errors -# http://theautomatic.net/2018/12/18/2-packages-for-extracting-dates-from-a-string-of-text-in-python/ -import re # import regular expressions to remove stray numbers in string that might interfere with date finding import json # import json to read project info stored in json file -import datefinder # another date finding library - - -# checker: gets the timestamp of the file inside the directory at the specified URL and returns it as a string -def checker(directory_URL, file_name): - page = requests.get(directory_URL).text - file_index = page.find(file_name) - # print(page) - - # remove stray numbers (file size numbers in particular) that might interfere with date finding - segment_clean = re.sub(r'\s\d+\s', ' ', page[file_index:]) # removes numbers for size - segment_clean = re.sub(r'\s\d+\w*\s', ' ', page[file_index:]) # removes numbers + size unit. e.x. 50kb - # print(segment_clean) - - # implementation using dateparser.search.search_dates - # notes: some dates don't parse correctly with this tool - # print(search_dates(page[file_index:], languages=['en'])) - # print(search_dates(page[file_index:])[0]) - - # finds the dates in the segment after the file name - # notes: a generator will be returned by the datefinder module. I'm typecasting it to a list. Please read the note of caution provided at the bottom. - matches = list(datefinder.find_dates(segment_clean)) - # print(matches) - - if len(matches) > 0: - date = matches[0] # date is of type datetime.datetime - return date.strftime("%m/%d/%Y, %H:%M:%S") - else: - return 'No dates found' - if __name__ == "__main__": """projects = json.load(open('projects.json',)) @@ -67,7 +34,7 @@ if __name__ == "__main__": print(f"Failure: {project} does not exist") continue project_class = getattr(sys.modules[__name__], project) - if project == "CPAN" or project == "ubuntu" or project == "ubuntu_releases" or project == "manjaro" or project == "mxlinux" or project == "mxlinux_iso" or project == "slackware" or project == "trisquel" or project == "cran" or project == "ctan" or project == "gentooportage": + if project == "CPAN" or project == "ubuntu" or project == "ubuntu_releases" or project == "manjaro" or project == "mxlinux" or project == "mxlinux_iso" or project == "cran" or project == "ctan" or project == "gentooportage": checker_result = project_class.check(data, project, current_time) if checker_result: print(f"Success: {project} up-to-date") diff --git a/projects/trisquel.py b/projects/trisquel.py index 2cea67d..fba7e76 100644 --- a/projects/trisquel.py +++ b/projects/trisquel.py @@ -1,26 +1,64 @@ -""" -Contains trisquel class -""" - -import os +from bs4 import BeautifulSoup +import requests +import re +import datefinder # another date finding library from project import Project from shared import CSC_MIRROR -import requests -import datefinder # another date finding library -from datetime import timedelta -from datetime import datetime -import re -import pandas as pd class trisquel(Project): """trisquel class""" @staticmethod - def check(data, project, current_time): - page = requests.get(data[project]["upstream"]).text - indexOfFile = page.find("mirror.csclub.uwaterloo.ca") + def checker(directory_URL, file_name): + page = requests.get(directory_URL).text + file_index = page.find(file_name) + # print(page) - m = re.search(r'(\d+ hour)|(\d+ hours)|(\d+(\.)?\d+ days)', page[indexOfFile:]) # solution from: https://stackoverflow.com/questions/21074100/how-to-convert-standard-timedelta-string-to-timedelta-object/21074460 + if file_index == -1: + return False - duration = pd.to_timedelta(m.group(0)) + str_dates = re.findall(r'(\d{2}-\w{3}-\d{4} \d{2}:\d{2})|(\d{4}-\d{2}-\d{2} \d{2}:\d{2})', page[file_index:]) + + return list(datefinder.find_dates("".join(str_dates[0])))[0] + + @classmethod + def scrape(cls, site1, site2): + # getting the request from url + r1 = requests.get(site1) + r2 = requests.get(site2) - return duration <= pd.to_timedelta(data[project]["out_of_sync_interval"], unit='s') \ No newline at end of file + # converting the text + s1 = BeautifulSoup(r1.text,"html.parser") + s2 = BeautifulSoup(r2.text,"html.parser") + + hrefs1 = [i.attrs['href'] for i in s1.find_all("a")] + hrefs2 = [i.attrs['href'] for i in s2.find_all("a")] + + for href in hrefs1: # for a href directories + if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/"): + print(href) + if href not in hrefs2: + return False + elif cls.checker(site1+href, "Release") > cls.checker(site2+href, "Release"): + return False + return True + + @classmethod + def check_iso(cls, site, mirrors): + for mirror in mirrors: + if cls.checker(site, "md5sum.txt") < cls.checker(mirror, "md5sum.txt"): + print(cls.checker(site, "md5sum.txt")) + print(cls.checker(mirror, "md5sum.txt")) + return False + return True + + @classmethod + def check(cls, data, project): + """Check if project packages are up-to-date""" + + csc_url = CSC_MIRROR + data[project]["csc"] + data[project]["file"] + upstream_url = data[project]["upstream"] + data[project]["file"] + + # print(cls.check_iso(upstream_url+"slackware-iso/", csc_url+"slackware-iso/")) + mirrors = data[project]["mirrors"] + + return cls.scrape(upstream_url, csc_url+"packages/dists/") and cls.check_iso(csc_url+"iso/", mirrors) \ No newline at end of file diff --git a/test.py b/test.py index ea4a329..adc8e47 100644 --- a/test.py +++ b/test.py @@ -7,7 +7,7 @@ from datetime import timedelta import time import pandas as pd import re # for salt stack specifically -from projects import slackware +from projects import trisquel import json # import json to read project info stored in json file # this function is brute force looping through the whole directory and checking dates @@ -65,7 +65,7 @@ def get_latest_date(web_dir): if __name__ =="__main__": with open("data.json", "r", encoding="utf-8") as file: data = json.load(file) - print(slackware.check(data, "slackware")) + print(trisquel.check(data, "trisquel")) """# website to be scrape site="https://cdimage.ubuntu.com/releases/"