|
|
|
from bs4 import BeautifulSoup
|
|
|
|
import requests
|
|
|
|
from project import Project
|
|
|
|
from shared import CSC_MIRROR
|
|
|
|
|
|
|
|
# this function is brute force looping through the whole directory and checking dates
|
|
|
|
# it may sound horrible, but for certain distros, i believe it's indeed the best solution
|
|
|
|
|
|
|
|
class raspberrypi(Project):
|
|
|
|
"""raspberrypi class"""
|
|
|
|
@staticmethod
|
|
|
|
def scrape(urls, site):
|
|
|
|
# getting the request from url
|
|
|
|
r = requests.get(site)
|
|
|
|
|
|
|
|
# converting the text
|
|
|
|
s = BeautifulSoup(r.text,"html.parser")
|
|
|
|
|
|
|
|
# salt stack specific code
|
|
|
|
# s = s.find("div", {"id": "listing"})
|
|
|
|
# print(s)
|
|
|
|
|
|
|
|
for i in s.find_all("a"): # for a href directories
|
|
|
|
href = i.attrs['href']
|
|
|
|
|
|
|
|
if href.endswith("/") and href != "../" and href != "/":
|
|
|
|
site_next = site+href+"Release"
|
|
|
|
|
|
|
|
if site_next not in urls:
|
|
|
|
urls.append(site_next)
|
|
|
|
# print(site_next)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def check(cls, data, project, current_time):
|
|
|
|
"""Check if project packages are up-to-date"""
|
|
|
|
# lists
|
|
|
|
urls1=[]
|
|
|
|
urls2=[]
|
|
|
|
|
|
|
|
csc_url = CSC_MIRROR + data[project]["csc"] + data[project]["file"]
|
|
|
|
upstream_url = data[project]["upstream"] + data[project]["file"]
|
|
|
|
|
|
|
|
# calling function
|
|
|
|
cls.scrape(urls1, csc_url)
|
|
|
|
cls.scrape(urls2, upstream_url)
|
|
|
|
|
|
|
|
if (len(urls1) != len(urls2)):
|
|
|
|
return False
|
|
|
|
urls1.sort()
|
|
|
|
urls2.sort()
|
|
|
|
for index, f in enumerate(urls1):
|
|
|
|
if requests.get(f).text != requests.get(urls2[index]).text:
|
|
|
|
# comparing the file content bc that's how the base class does it, but we can speed it up by just comparing the dates
|
|
|
|
return False
|
|
|
|
return True
|