Check whether our mirror packages are up to date.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
mirror-checker/projects/linuxmint_packages.py

56 lines
1.8 KiB

from bs4 import BeautifulSoup
import requests
from project import Project
from shared import CSC_MIRROR
# this function is brute force looping through the whole directory and checking dates
# it may sound horrible, but for certain distros, i believe it's indeed the best solution
class linuxmint_packages(Project):
"""linuxmint_packages class"""
@staticmethod
def scrape(urls, site):
# getting the request from url
r = requests.get(site)
# converting the text
s = BeautifulSoup(r.text,"html.parser")
# salt stack specific code
# s = s.find("div", {"id": "listing"})
# print(s)
for i in s.find_all("a"): # for a href directories
href = i.attrs['href']
if href.endswith("/") and href != "../" and href != "/":
site_next = site+href+"Release"
if site_next not in urls:
urls.append(site_next)
# print(site_next)
@classmethod
def check(cls, data, project, current_time):
"""Check if project packages are up-to-date"""
# lists
urls1=[]
urls2=[]
csc_url = CSC_MIRROR + data[project]["csc"] + data[project]["file"]
upstream_url = data[project]["upstream"] + data[project]["file"]
# calling function
cls.scrape(urls1, csc_url)
cls.scrape(urls2, upstream_url)
if (len(urls1) != len(urls2)):
return False
urls1.sort()
urls2.sort()
for index, f in enumerate(urls1):
if requests.get(f).text != requests.get(urls2[index]).text:
# comparing the file content bc that's how the base class does it, but we can speed it up by just comparing the dates
return False
return True