Fixed 3 more broken mirror checkers

This commit is contained in:
Justin Toft 2022-08-04 17:53:12 -04:00
parent 90bde5a754
commit 7e77546c3e
Signed by untrusted user who does not match committer: jtoft
GPG Key ID: F583036268C8DFF6
2 changed files with 21 additions and 40 deletions

View File

@ -5,6 +5,7 @@ from project import Project
from shared import CSC_MIRROR
import datefinder # another date finding library
from datetime import timedelta
class macports(Project):
"""macports class"""
@ -13,18 +14,20 @@ class macports(Project):
def checker(directory_URL, file_name):
page = requests.get(directory_URL).text
file_index = page.find(file_name)
# print(page)
end_index = page[file_index:].find("</tr>") + file_index
# The CSC mirror does not use tr tags, so end_index will be set to the end of the file
if end_index == (file_index - 1):
end_index = len(page) - 1
# remove stray numbers (file size numbers in particular) that might interfere with date finding
segment_clean = re.sub(r'\s\d+\s', ' ', page[file_index:]) # removes numbers for size
segment_clean = re.sub(r'\s\d+\w*\s', ' ', page[file_index:]) # removes numbers + size unit. e.x. 50kb
# print(segment_clean)
segment_clean = re.sub(r'\s\d+\s', ' ', page[file_index:end_index]) # removes numbers for size
segment_clean = re.sub(r'\s\d+\w*\s', ' ', page[file_index:end_index]) # removes numbers + size unit. e.x. 50kb
# finds the dates in the segment after the file name
# notes: a generator will be returned by the datefinder module. I'm typecasting it to a list. Please read the note of caution provided at the bottom.
matches = list(datefinder.find_dates(segment_clean))
# print(matches[0])
return matches[0]
@classmethod
@ -34,4 +37,5 @@ class macports(Project):
upstream_url = data[project]["upstream"]
file_name = data[project]["file"]
return cls.checker(csc_url, file_name) == cls.checker(upstream_url, file_name)
# Subtract 2 hours from upstream to account for timezones
return cls.checker(csc_url, file_name) == cls.checker(upstream_url, file_name) - timedelta(hours=2)

View File

@ -1,39 +1,16 @@
from bs4 import BeautifulSoup
import requests
"""
Contains ubuntu class
"""
import os
from project import Project
from shared import CSC_MIRROR
from shared import NUM_UBUNTU_RELEASES
import requests
class ubuntu_ports(Project):
"""ubuntu_ports class"""
@staticmethod
def scrape(site1, site2):
# getting the request from url
r1 = requests.get(site1)
r2 = requests.get(site2)
# converting the text
s1 = BeautifulSoup(r1.text,"html.parser")
s2 = BeautifulSoup(r2.text,"html.parser")
hrefs1 = [i.attrs['href'] for i in s1.find_all("a")]
hrefs2 = [i.attrs['href'] for i in s2.find_all("a")]
for href in hrefs1: # for a href directories
if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/"):
# print(href)
if href not in hrefs2:
return False
elif requests.get(site1+href+"Release").text != requests.get(site2+href+"Release").text:
return False
return True
@classmethod
def check(cls, data, project, current_time):
"""Check if project packages are up-to-date"""
csc_url = CSC_MIRROR + data[project]["csc"] + data[project]["file"]
upstream_url = data[project]["upstream"] + data[project]["file"]
# calling function
return cls.scrape(upstream_url, csc_url)
def check(data, project, current_time):
page = requests.get(data[project]["upstream"]).text
return page.count("Up to date") == NUM_UBUNTU_RELEASES