updated slackware

This commit is contained in:
Tom 2021-10-17 15:56:42 -07:00
parent 68e13c327e
commit fb7337457e
5 changed files with 75 additions and 27 deletions

View File

@ -74,11 +74,11 @@ raspbian: http://archive.raspbian.org/raspbian/ snapshotindex.txt is most likely
sagemath: same source tarballs as them (the sage-*.tar.gz files under 'Source Code') sagemath: same source tarballs as them (the sage-*.tar.gz files under 'Source Code')
salt stack: checking the "Latest release" text under the 'About' header salt stack: checking the "Latest release" text under the 'About' header
scientific: https://scientificlinux.org/downloads/sl-mirrors/ not checking this one since it's abandoned scientific: https://scientificlinux.org/downloads/sl-mirrors/ not checking this one since it's abandoned
slackware: https://mirrors.slackware.com/mirrorlist/ https://mirrors.slackware.com/slackware/ checking using the last updated date here, don't know if it's entirely accurate slackware: https://mirrors.slackware.com/slackware/ check whether we have each release and whether the timestamp for CHECKSUMS.md5 in each release is the same, for slackware-iso, just make sure that our list of directories is the same
tdf: https://download.documentfoundation.org/ tdf: https://download.documentfoundation.org/
trisquel: https://trisquel.info/mirmon/index.html out of date website!? please recheck this!!! trisquel: https://trisquel.info/mirmon/index.html out of date website!? please recheck this!!!
ubuntu: https://launchpad.net/ubuntu/+mirror/mirror.csclub.uwaterloo.ca-archive ubuntu: https://launchpad.net/ubuntu/+mirror/mirror.csclub.uwaterloo.ca-archive
ubuntu-ports: http://ports.ubuntu.com/ubuntu-ports/ checks the file anonster.canonical.com, which appears to be a timestamp (check it to make sure!!!) ubuntu-ports: http://ports.ubuntu.com/ubuntu-ports/ checking the Release files in dists
ubuntu-ports-releases: https://cdimage.ubuntu.com/releases/ has public repo, no timestamp, no status tracker, brute force looped it ubuntu-ports-releases: https://cdimage.ubuntu.com/releases/ has public repo, no timestamp, no status tracker, brute force looped it
ubuntu-releases: https://releases.ubuntu.com/ ubuntu-releases: https://releases.ubuntu.com/
vlc: http://download.videolan.org/pub/videolan/ vlc: http://download.videolan.org/pub/videolan/

View File

@ -266,8 +266,8 @@
"slackware": { "slackware": {
"out_of_sync_since": null, "out_of_sync_since": null,
"out_of_sync_interval": 86400, "out_of_sync_interval": 86400,
"csc": "", "csc": "slackware/",
"upstream": "https://mirrors.slackware.com/mirrorlist/", "upstream": "https://mirrors.slackware.com/slackware/",
"file": "" "file": ""
}, },
"trisquel": { "trisquel": {

View File

@ -1,26 +1,74 @@
""" from bs4 import BeautifulSoup
Contains slackware class import requests
""" import re
import datefinder # another date finding library
import os
from project import Project from project import Project
from shared import CSC_MIRROR from shared import CSC_MIRROR
import requests
import datefinder # another date finding library
from datetime import timedelta
from datetime import datetime
import re
import pandas as pd
class slackware(Project): class slackware(Project):
"""slackware class""" """slackware class"""
@staticmethod @staticmethod
def check(data, project, current_time): def checker(directory_URL, file_name):
page = requests.get(data[project]["upstream"]).text page = requests.get(directory_URL).text
if (page.find("mirror.csclub.uwaterloo.ca/slackware/") != -1): file_index = page.find(file_name)
indexOfFile = page.find("Last Updated:") # print(page)
matches = list(datefinder.find_dates(page[indexOfFile:]))
date = matches[0] # date is of type datetime.datetime if file_index == -1:
return(pd.to_datetime(current_time, unit='s') - date.replace(tzinfo=None) <= pd.to_timedelta(data[project]["out_of_sync_interval"], unit='s')) return False
else:
return False str_dates = re.findall(r'(\d{2}-\w{3}-\d{4} \d{2}:\d{2})|(\d{4}-\d{2}-\d{2} \d{2}:\d{2})', page[file_index:])
return list(datefinder.find_dates("".join(str_dates[0])))[0]
@classmethod
def scrape(cls, site1, site2):
# getting the request from url
r1 = requests.get(site1)
r2 = requests.get(site2)
# converting the text
s1 = BeautifulSoup(r1.text,"html.parser")
s2 = BeautifulSoup(r2.text,"html.parser")
hrefs1 = [i.attrs['href'] for i in s1.find_all("a")]
hrefs2 = [i.attrs['href'] for i in s2.find_all("a")]
for href in hrefs1: # for a href directories
if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/") and not re.match(r'slackware-([1-7]|8\.0).*', href) and href != "slackware-iso/" and href != "slackware-current/" and href != "slackware-pre-1.0-beta/" and href != "unsupported/":
print(href)
if href not in hrefs2:
return False
elif cls.checker(site1+href, "CHECKSUMS.md5") != cls.checker(site2+href, "CHECKSUMS.md5"):
return False
return True
@staticmethod
def check_iso(site1, site2):
# getting the request from url
r1 = requests.get(site1)
r2 = requests.get(site2)
# converting the text
s1 = BeautifulSoup(r1.text,"html.parser")
s2 = BeautifulSoup(r2.text,"html.parser")
hrefs1 = [i.attrs['href'] for i in s1.find_all("a")]
hrefs2 = [i.attrs['href'] for i in s2.find_all("a")]
for href in hrefs1: # for a href directories
if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/") and not href.startswith("http"):
print(href)
if href not in hrefs2:
return False
return True
@classmethod
def check(cls, data, project):
"""Check if project packages are up-to-date"""
csc_url = CSC_MIRROR + data[project]["csc"] + data[project]["file"]
upstream_url = data[project]["upstream"] + data[project]["file"]
# print(cls.check_iso(upstream_url+"slackware-iso/", csc_url+"slackware-iso/"))
return cls.scrape(upstream_url, csc_url) and cls.check_iso(upstream_url+"slackware-iso/", csc_url+"slackware-iso/")

View File

@ -21,7 +21,7 @@ class ubuntu_ports(Project):
for href in hrefs1: # for a href directories for href in hrefs1: # for a href directories
if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/"): if href.endswith("/") and href != "../" and href != "/" and not href.startswith("/"):
print(href) # print(href)
if href not in hrefs2: if href not in hrefs2:
return False return False
elif requests.get(site1+href+"Release").text != requests.get(site2+href+"Release").text: elif requests.get(site1+href+"Release").text != requests.get(site2+href+"Release").text:

View File

@ -7,7 +7,7 @@ from datetime import timedelta
import time import time
import pandas as pd import pandas as pd
import re # for salt stack specifically import re # for salt stack specifically
from projects import ubuntu_ports from projects import slackware
import json # import json to read project info stored in json file import json # import json to read project info stored in json file
# this function is brute force looping through the whole directory and checking dates # this function is brute force looping through the whole directory and checking dates
@ -65,7 +65,7 @@ def get_latest_date(web_dir):
if __name__ =="__main__": if __name__ =="__main__":
with open("data.json", "r", encoding="utf-8") as file: with open("data.json", "r", encoding="utf-8") as file:
data = json.load(file) data = json.load(file)
print(ubuntu_ports.check(data, "ubuntu_ports")) print(slackware.check(data, "slackware"))
"""# website to be scrape """# website to be scrape
site="https://cdimage.ubuntu.com/releases/" site="https://cdimage.ubuntu.com/releases/"