2022-04-07 07:01:09 +02:00
|
|
|
#!/usr/bin/python
|
|
|
|
|
|
|
|
import re
|
|
|
|
import json
|
|
|
|
|
|
|
|
"""
|
|
|
|
Examples:
|
|
|
|
1. - /r/place
|
|
|
|
- r/place
|
|
|
|
2. /rplace
|
|
|
|
3. - https://www.reddit.com/r/place
|
|
|
|
- www.reddit.com/r/place
|
|
|
|
- reddit.com/r/place
|
|
|
|
UNUSED AND FAULTY
|
2022-04-07 14:43:57 +02:00
|
|
|
4. - https://place.reddit.com
|
2022-04-07 07:01:09 +02:00
|
|
|
- place.reddit.com
|
2022-04-07 14:43:57 +02:00
|
|
|
5. - [https://place.reddit.com](https://place.reddit.com)
|
2022-04-07 07:01:09 +02:00
|
|
|
- [place.reddit.com](https://place.reddit.com)
|
|
|
|
"""
|
2022-04-07 11:02:43 +02:00
|
|
|
FS_REGEX = {
|
2022-04-08 06:48:13 +02:00
|
|
|
"commatization": r'( *(,+ +|,+ |,+)| +)(and|&|;)( *(,+ +|,+ |,+)| +)|, *$| +',
|
2022-04-07 17:47:35 +02:00
|
|
|
"pattern1": r'\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
|
|
|
"pattern2": r'^\/*[rR](?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
2022-04-07 07:01:09 +02:00
|
|
|
"pattern3": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
|
2022-04-07 17:47:35 +02:00
|
|
|
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
|
|
|
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
2022-04-07 16:39:25 +02:00
|
|
|
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
|
2022-04-17 08:18:43 +02:00
|
|
|
"pattern1new": r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*'
|
2022-04-07 14:43:57 +02:00
|
|
|
# "pattern4": r'(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*',
|
|
|
|
# "pattern5": r'\[(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\]\((?:https:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\)"',
|
2022-04-07 07:01:09 +02:00
|
|
|
}
|
|
|
|
|
2022-04-07 17:03:30 +02:00
|
|
|
VALIDATE_REGEX = {
|
2022-04-08 06:48:13 +02:00
|
|
|
"subreddit": r'^ *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *(, *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *)*$|^$',
|
2022-04-07 17:52:47 +02:00
|
|
|
"website": r'^https?://[^\s/$.?#].[^\s]*$|^$'
|
2022-04-07 17:03:30 +02:00
|
|
|
}
|
|
|
|
|
2022-04-07 11:02:43 +02:00
|
|
|
CL_REGEX = r'\[(.+?)\]\((.+?)\)'
|
2022-04-08 10:53:57 +02:00
|
|
|
CWTS_REGEX = {
|
|
|
|
"url": r'^(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/)$',
|
|
|
|
"subreddit": r'^\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})\/?$'
|
|
|
|
}
|
2022-04-08 07:06:34 +02:00
|
|
|
CSTW_REGEX = {
|
|
|
|
"website": r'^https?://[^\s/$.?#].[^\s]*$',
|
|
|
|
"user": r'^\/*u\/([A-Za-z0-9][A-Za-z0-9_]{1,20})$'
|
|
|
|
}
|
2022-04-07 11:02:43 +02:00
|
|
|
|
|
|
|
# r/... to /r/...
|
|
|
|
SUBREDDIT_TEMPLATE = r"/r/\1"
|
2022-04-08 07:02:02 +02:00
|
|
|
USER_TEMPLATE = r"/u/\1"
|
2022-04-07 07:01:09 +02:00
|
|
|
|
|
|
|
def format_subreddit(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Fix formatting of the value on "subreddit".
|
|
|
|
"""
|
2022-04-07 07:01:09 +02:00
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
if "subreddit" in entry and entry["subreddit"]:
|
|
|
|
|
|
|
|
subredditLink = entry["subreddit"]
|
|
|
|
|
|
|
|
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern2"], SUBREDDIT_TEMPLATE, subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern3user"], USER_TEMPLATE, subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
|
|
|
|
|
|
|
|
entry["subreddit"] = subredditLink
|
|
|
|
|
|
|
|
if "links" in entry and "subreddit" in entry["links"]:
|
|
|
|
|
|
|
|
for i in range(len(entry["links"]["subreddit"])):
|
|
|
|
|
|
|
|
subredditLink = entry["links"]["subreddit"][i]
|
|
|
|
|
|
|
|
subredditLink = re.sub(FS_REGEX["pattern3"], r"\1", subredditLink)
|
2022-04-17 08:12:22 +02:00
|
|
|
subredditLink = re.sub(FS_REGEX["pattern1new"], r"\1", subredditLink)
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
entry["links"]["subreddit"][i] = subredditLink
|
2022-04-07 07:01:09 +02:00
|
|
|
|
|
|
|
return entry
|
|
|
|
|
|
|
|
def collapse_links(entry: dict):
|
2022-04-15 13:55:51 +02:00
|
|
|
"""
|
|
|
|
Collapses Markdown links.
|
|
|
|
"""
|
|
|
|
|
2022-04-08 18:05:48 +02:00
|
|
|
if "website" in entry and entry['website']:
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
website = entry["website"]
|
|
|
|
|
2022-04-08 18:05:48 +02:00
|
|
|
if re.search(CL_REGEX, website):
|
|
|
|
match = re.search(CL_REGEX, website)
|
|
|
|
if match.group(1) == match.group(2):
|
|
|
|
website = match.group(2)
|
|
|
|
|
|
|
|
entry["website"] = website
|
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
elif "links" in entry and "website" in entry["links"]:
|
|
|
|
|
|
|
|
for i in range(len(entry["links"]["website"])):
|
|
|
|
|
|
|
|
website = entry["links"]["website"][i]
|
|
|
|
|
|
|
|
if re.search(CL_REGEX, website):
|
|
|
|
match = re.search(CL_REGEX, website)
|
|
|
|
if match.group(1) == match.group(2):
|
|
|
|
website = match.group(2)
|
|
|
|
|
|
|
|
entry["links"]["website"][i] = website
|
|
|
|
|
2022-04-08 18:05:48 +02:00
|
|
|
if "subreddit" in entry and entry['subreddit']:
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
subreddit = entry["subreddit"]
|
|
|
|
|
2022-04-08 18:05:48 +02:00
|
|
|
if re.search(CL_REGEX, subreddit):
|
|
|
|
match = re.search(CL_REGEX, subreddit)
|
|
|
|
if match.group(1) == match.group(2):
|
|
|
|
subreddit = match.group(2)
|
|
|
|
|
|
|
|
entry["subreddit"] = subreddit
|
2022-04-07 07:01:09 +02:00
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
elif "links" in entry and "subreddit" in entry["links"]:
|
|
|
|
|
|
|
|
for i in range(len(entry["links"]["subreddit"])):
|
|
|
|
|
|
|
|
subreddit = entry["links"]["subreddit"][i]
|
|
|
|
|
|
|
|
if re.search(CL_REGEX, subreddit):
|
|
|
|
match = re.search(CL_REGEX, subreddit)
|
|
|
|
if match.group(1) == match.group(2):
|
|
|
|
subreddit = match.group(2)
|
|
|
|
|
|
|
|
entry["links"]["subreddit"][i] = subreddit
|
|
|
|
|
|
|
|
|
2022-04-07 07:01:09 +02:00
|
|
|
return entry
|
|
|
|
|
|
|
|
def remove_extras(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Removing unnecessary extra characters and converts select characters.
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-07 18:25:25 +02:00
|
|
|
if "subreddit" in entry and entry["subreddit"]:
|
|
|
|
# if not entry["subreddit"].startswith('/r/'):
|
|
|
|
# entry["subreddit"] = re.sub(r'^(.*)(?=\/r\/)', r'', entry["subreddit"])
|
|
|
|
entry["subreddit"] = re.sub(r'[.,]+$', r'', entry["subreddit"])
|
|
|
|
|
2022-04-07 07:01:09 +02:00
|
|
|
for key in entry:
|
|
|
|
if not entry[key] or not isinstance(entry[key], str):
|
|
|
|
continue
|
|
|
|
# Leading and trailing spaces
|
2022-04-08 06:36:57 +02:00
|
|
|
entry[key] = entry[key].strip()
|
2022-04-07 14:43:57 +02:00
|
|
|
# Double characters
|
2022-04-07 16:50:28 +02:00
|
|
|
entry[key] = re.sub(r' {2,}(?!\n)', r' ', entry[key])
|
|
|
|
entry[key] = re.sub(r' {3,}\n', r' ', entry[key])
|
|
|
|
entry[key] = re.sub(r'\n{3,}', r'\n\n', entry[key])
|
2022-04-07 16:33:10 +02:00
|
|
|
entry[key] = re.sub(r'r\/{2,}', r'r\/', entry[key])
|
2022-04-07 07:01:09 +02:00
|
|
|
entry[key] = re.sub(r',{2,}', r',', entry[key])
|
2022-04-08 11:11:54 +02:00
|
|
|
# Smart quotation marks
|
|
|
|
entry[key] = re.sub(r'[\u201c\u201d]', '"', entry[key])
|
|
|
|
entry[key] = re.sub(r'[\u2018\u2019]', "'", entry[key])
|
2022-04-07 07:01:09 +02:00
|
|
|
# Psuedo-empty strings
|
2022-04-07 17:38:26 +02:00
|
|
|
if entry[key] in ["n/a", "N/A", "na", "NA", "-", "null", "none", "None"]:
|
2022-04-07 07:01:09 +02:00
|
|
|
entry[key] = ""
|
|
|
|
|
|
|
|
return entry
|
|
|
|
|
2022-04-10 11:56:10 +02:00
|
|
|
def remove_duplicate_points(entry: dict):
|
2022-04-10 13:58:13 +02:00
|
|
|
"""
|
|
|
|
Removes points from paths that occur twice after each other
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
if not "path" in entry:
|
|
|
|
return entry
|
|
|
|
|
|
|
|
if isinstance(entry['path'], list):
|
|
|
|
path: list = entry['path']
|
|
|
|
previous: list = path[0]
|
|
|
|
for i in range(len(path)-1, -1, -1):
|
|
|
|
current: list = path[i]
|
|
|
|
if current == previous:
|
|
|
|
path.pop(i)
|
|
|
|
previous = current
|
|
|
|
else:
|
|
|
|
for key in entry['path']:
|
|
|
|
path: list = entry['path'][key]
|
|
|
|
previous: list = path[0]
|
|
|
|
for i in range(len(path)-1, -1, -1):
|
|
|
|
current: list = path[i]
|
|
|
|
if current == previous:
|
|
|
|
path.pop(i)
|
|
|
|
previous = current
|
2022-04-10 13:58:13 +02:00
|
|
|
|
|
|
|
return entry
|
2022-04-10 11:56:10 +02:00
|
|
|
|
2022-04-07 07:01:09 +02:00
|
|
|
def fix_r_caps(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Fixes capitalization of /r/. (/R/place -> /r/place)
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-07 09:36:29 +02:00
|
|
|
if not "description" in entry or not entry['description']:
|
2022-04-07 07:01:09 +02:00
|
|
|
return entry
|
|
|
|
|
2022-04-07 14:43:57 +02:00
|
|
|
entry["description"] = re.sub(r'([^\w]|^)\/R\/', '\1/r/', entry["description"])
|
|
|
|
entry["description"] = re.sub(r'([^\w]|^)R\/', '\1r/', entry["description"])
|
2022-04-07 07:01:09 +02:00
|
|
|
|
|
|
|
return entry
|
|
|
|
|
2022-04-07 09:10:08 +02:00
|
|
|
def fix_no_protocol_urls(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Fixes URLs with no protocol by adding "https://" protocol.
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
if "links" in entry and "website" in entry['links']:
|
|
|
|
for i in range(len(entry["links"]["website"])):
|
|
|
|
if entry["links"]["website"][i] and not entry["links"]["website"][i].startswith("http"):
|
|
|
|
entry["links"]["website"][i] = "https://" + entry["website"]
|
|
|
|
elif "website" in entry and not entry['website']:
|
|
|
|
if not entry["website"].startswith("http"):
|
|
|
|
entry["website"] = "https://" + entry["website"]
|
2022-04-07 09:10:08 +02:00
|
|
|
|
|
|
|
return entry
|
|
|
|
|
2022-04-07 11:02:43 +02:00
|
|
|
def convert_website_to_subreddit(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Converts the subreddit link on "website" to "subreddit" if possible.
|
|
|
|
"""
|
2022-04-07 11:02:43 +02:00
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
if "links" in entry and "website" in entry["links"]:
|
|
|
|
for i in range(len(entry["links"]["website"])):
|
|
|
|
if re.match(CWTS_REGEX["url"], entry["links"]["website"][i]):
|
|
|
|
new_subreddit = re.sub(CWTS_REGEX["url"], r"\1", entry["links"]["website"][i])
|
|
|
|
if new_subreddit in entry["links"]["subreddit"]:
|
|
|
|
entry["links"]["website"][i] = ""
|
|
|
|
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
|
|
|
|
if not "subreddit" in entry["links"]:
|
|
|
|
entry["links"]["subreddit"] = []
|
|
|
|
entry["links"]["subreddit"].append(new_subreddit)
|
|
|
|
entry["links"]["website"][i] = ""
|
|
|
|
elif re.match(CWTS_REGEX["subreddit"], entry["links"]["website"][i]):
|
|
|
|
new_subreddit = re.sub(CWTS_REGEX["subreddit"], r"\1", entry["links"]["website"][i])
|
|
|
|
if new_subreddit in entry["links"]["subreddit"]:
|
|
|
|
entry["links"]["website"][i] = ""
|
|
|
|
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
|
|
|
|
if not "subreddit" in entry["links"]:
|
|
|
|
entry["links"]["subreddit"] = []
|
|
|
|
entry["links"]["subreddit"].append(new_subreddit)
|
|
|
|
entry["links"]["website"][i] = ""
|
|
|
|
|
|
|
|
elif "website" in entry and entry['website']:
|
|
|
|
if re.match(CWTS_REGEX["url"], entry["website"]):
|
|
|
|
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
|
|
|
|
if (new_subreddit.lower() == entry["subreddit"].lower()):
|
|
|
|
entry["website"] = ""
|
|
|
|
elif not "subreddit" in entry or entry['subreddit'] == "":
|
|
|
|
entry["subreddit"] = new_subreddit
|
|
|
|
entry["website"] = ""
|
|
|
|
elif re.match(CWTS_REGEX["subreddit"], entry["website"]):
|
|
|
|
new_subreddit = re.sub(CWTS_REGEX["subreddit"], SUBREDDIT_TEMPLATE, entry["website"])
|
|
|
|
if (new_subreddit.lower() == entry["subreddit"].lower()):
|
|
|
|
entry["website"] = ""
|
|
|
|
elif not "subreddit" in entry or entry['subreddit'] == "":
|
|
|
|
entry["subreddit"] = new_subreddit
|
|
|
|
entry["website"] = ""
|
2022-04-07 11:02:43 +02:00
|
|
|
|
|
|
|
return entry
|
|
|
|
|
2022-04-07 16:31:56 +02:00
|
|
|
def convert_subreddit_to_website(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Converts the links on "subreddit" to a "website" if needed. This also supports Reddit users (/u/reddit).
|
|
|
|
"""
|
2022-04-07 16:31:56 +02:00
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
if "links" in entry and "subreddit" in entry["links"]:
|
|
|
|
for i in range(len(entry["links"]["subreddit"])):
|
|
|
|
if re.match(CSTW_REGEX["website"], entry["links"]["subreddit"][i]):
|
|
|
|
if "website" in entry["links"] and entry["links"]["subreddit"][i] in entry["links"]["website"]:
|
|
|
|
entry["links"]["subreddit"][i] = ""
|
|
|
|
elif not "website" in entry["links"] or len(entry["website"]) == 0:
|
|
|
|
if not "website" in entry["links"]:
|
|
|
|
entry["links"]["website"] = []
|
|
|
|
entry["website"].append(entry["links"]["subreddit"][i])
|
|
|
|
entry["links"]["subreddit"][i] = ""
|
|
|
|
elif re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]):
|
|
|
|
if not "website" in entry["links"] or len(entry["website"]) == 0:
|
|
|
|
username = re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]).group(1)
|
|
|
|
if not "website" in entry["links"]:
|
|
|
|
entry["links"]["website"] = []
|
|
|
|
entry["website"].append("https://www.reddit.com/user/" + username)
|
|
|
|
entry["links"]["subreddit"][i] = ""
|
|
|
|
|
|
|
|
elif "subreddit" in entry and entry['subreddit']:
|
|
|
|
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
|
|
|
|
if (entry["website"].lower() == entry["subreddit"].lower()):
|
|
|
|
entry["subreddit"] = ""
|
|
|
|
elif not "website" in entry or entry['website'] == "":
|
|
|
|
entry["website"] = entry["subreddit"]
|
|
|
|
entry["subreddit"] = ""
|
|
|
|
elif re.match(CSTW_REGEX["user"], entry["subreddit"]):
|
|
|
|
if not "website" in entry or entry['website'] == "":
|
|
|
|
username = re.match(CSTW_REGEX["user"], entry["subreddit"]).group(1)
|
|
|
|
entry["website"] = "https://www.reddit.com/user/" + username
|
|
|
|
entry["subreddit"] = ""
|
2022-04-07 16:31:56 +02:00
|
|
|
|
|
|
|
return entry
|
2022-04-09 21:08:36 +02:00
|
|
|
|
|
|
|
def calculate_center(path: list):
|
|
|
|
"""
|
|
|
|
Caluclates the center of a polygon
|
|
|
|
|
|
|
|
adapted from /web/_js/draw.js:calucalteCenter()
|
|
|
|
"""
|
|
|
|
area = 0
|
|
|
|
x = 0
|
|
|
|
y = 0
|
|
|
|
|
|
|
|
for i in range(len(path)):
|
|
|
|
point1 = path[i]
|
|
|
|
point2 = path[i-1 if i != 0 else len(path)-1]
|
|
|
|
f = point1[0] * point2[1] - point2[0] * point1[1]
|
|
|
|
area += f
|
|
|
|
x += (point1[0] + point2[0]) * f
|
|
|
|
y += (point1[1] + point2[1]) * f
|
|
|
|
|
|
|
|
area *= 3
|
|
|
|
|
|
|
|
if area != 0:
|
|
|
|
return [x // area + 0.5, y // area + 0.5]
|
|
|
|
else:
|
|
|
|
# get the center of a straight line
|
|
|
|
max_x = max(i[0] for i in path)
|
|
|
|
min_x = min(i[0] for i in path)
|
|
|
|
max_y = max(i[1] for i in path)
|
|
|
|
min_y = min(i[1] for i in path)
|
|
|
|
return [(max_x + min_x) // 2 + 0.5, (max_y + min_y) // 2 + 0.5]
|
|
|
|
|
|
|
|
def update_center(entry: dict):
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
checks if the center of a entry is up to date, and updates it if it's either missing or outdated.
|
2022-04-09 21:08:36 +02:00
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-09 21:08:36 +02:00
|
|
|
if 'path' not in entry:
|
|
|
|
return entry
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
if isinstance(entry['path'], list):
|
|
|
|
path = entry['path']
|
|
|
|
if len(path) > 1:
|
|
|
|
calculated_center = calculate_center(path)
|
|
|
|
if 'center' not in entry or entry['center'] != calculated_center:
|
|
|
|
entry['center'] = calculated_center
|
|
|
|
else:
|
|
|
|
for key in entry['path']:
|
|
|
|
path = entry['path'][key]
|
|
|
|
if len(path) > 1:
|
|
|
|
calculated_center = calculate_center(path)
|
|
|
|
if 'center' not in entry or key not in entry['center'] or entry['center'][key] != calculated_center:
|
|
|
|
entry['center'][key] = calculated_center
|
|
|
|
|
|
|
|
return entry
|
|
|
|
|
|
|
|
def remove_empty_and_similar(entry: dict):
|
|
|
|
"""
|
|
|
|
Removes empty items on lists, usually from the past formattings.
|
|
|
|
"""
|
|
|
|
|
|
|
|
for key in entry["links"]:
|
|
|
|
small = list(map(lambda x: x.lower(), entry["links"][key]))
|
|
|
|
entry["links"][key] = [x for x in entry["links"][key] if x and x.lower() in small]
|
|
|
|
|
2022-04-09 21:08:36 +02:00
|
|
|
return entry
|
2022-04-08 13:04:49 +02:00
|
|
|
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-07 17:03:30 +02:00
|
|
|
def validate(entry: dict):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Validates the entry. Catch errors and tell warnings related to the entry.
|
|
|
|
|
|
|
|
Status code key:
|
|
|
|
0: All valid, no problems
|
|
|
|
1: Informational logs that may be ignored
|
|
|
|
2: Warnings that may effect user experience when interacting with the entry
|
|
|
|
3: Errors that make the entry inaccessible or broken.
|
|
|
|
"""
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-08 13:04:49 +02:00
|
|
|
return_status = 0
|
2022-04-07 17:03:30 +02:00
|
|
|
if (not "id" in entry or (not entry['id'] and not entry['id'] == 0)):
|
|
|
|
print(f"Wait, no id here! How did this happened? {entry}")
|
2022-04-08 13:04:49 +02:00
|
|
|
return_status = 3
|
|
|
|
entry['id'] = '[MISSING_ID]'
|
2022-04-15 13:55:51 +02:00
|
|
|
|
|
|
|
if "path" in entry:
|
|
|
|
if isinstance(entry['path'], list):
|
|
|
|
if len(entry["path"]) > 0:
|
|
|
|
print(f"Entry {entry['id']} has no points!")
|
|
|
|
return_status = 3
|
|
|
|
elif len(entry["path"]) < 3:
|
|
|
|
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
|
|
|
|
return_status = 3
|
|
|
|
else:
|
|
|
|
for key in entry['path']:
|
|
|
|
path = entry['path'][key]
|
|
|
|
if len(path) > 0:
|
|
|
|
print(f"Period {key} of entry {entry['id']} has no points!")
|
|
|
|
return_status = 3
|
|
|
|
elif len(path) < 3:
|
|
|
|
print(f"Period {key} of entry {entry['id']} only has {len(entry['path'])} point(s)!")
|
|
|
|
return_status = 3
|
|
|
|
else:
|
|
|
|
print(f"Entry {entry['id']} has no path at all!")
|
2022-04-08 17:36:55 +02:00
|
|
|
return_status = 3
|
2022-04-15 13:55:51 +02:00
|
|
|
|
2022-04-07 17:03:30 +02:00
|
|
|
for key in entry:
|
|
|
|
if key in VALIDATE_REGEX and not re.match(VALIDATE_REGEX[key], entry[key]):
|
2022-04-08 13:04:49 +02:00
|
|
|
if return_status < 2: return_status = 2
|
2022-04-07 17:03:30 +02:00
|
|
|
print(f"{key} of entry {entry['id']} is still invalid! {entry[key]}")
|
2022-04-08 13:04:49 +02:00
|
|
|
return return_status
|
2022-04-07 16:31:56 +02:00
|
|
|
|
2022-04-07 07:01:09 +02:00
|
|
|
def per_line_entries(entries: list):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Returns a string of all the entries, with every entry in one line.
|
|
|
|
"""
|
2022-04-07 07:01:09 +02:00
|
|
|
out = "[\n"
|
|
|
|
for entry in entries:
|
2022-04-08 13:04:49 +02:00
|
|
|
if entry:
|
2022-04-08 16:17:16 +02:00
|
|
|
out += json.dumps(entry, ensure_ascii=False) + ",\n"
|
2022-04-07 07:01:09 +02:00
|
|
|
out = out[:-2] + "\n]"
|
|
|
|
return out
|
|
|
|
|
|
|
|
def format_all(entry: dict, silent=False):
|
2022-04-08 13:04:49 +02:00
|
|
|
"""
|
|
|
|
Format using all the available formatters.
|
|
|
|
Outputs a tuple containing the entry and the validation status code.
|
|
|
|
|
|
|
|
Status code key:
|
|
|
|
0: All valid, no problems
|
|
|
|
1: Informational logs that may be ignored
|
|
|
|
2: Warnings that may effect user experience when interacting with the entry
|
|
|
|
3: Errors that make the entry inaccessible or broken.
|
|
|
|
"""
|
2022-04-07 07:01:09 +02:00
|
|
|
def print_(*args, **kwargs):
|
|
|
|
if not silent:
|
|
|
|
print(*args, **kwargs)
|
|
|
|
print_("Fixing r/ capitalization...")
|
|
|
|
entry = fix_r_caps(entry)
|
|
|
|
print_("Fix formatting of subreddit...")
|
|
|
|
entry = format_subreddit(entry)
|
2022-04-07 14:43:57 +02:00
|
|
|
print_("Collapsing Markdown links...")
|
|
|
|
entry = collapse_links(entry)
|
2022-04-07 11:43:12 +02:00
|
|
|
print_("Converting website links to subreddit (if possible)...")
|
|
|
|
entry = convert_website_to_subreddit(entry)
|
2022-04-07 16:31:56 +02:00
|
|
|
print_("Converting subreddit links to website (if needed)...")
|
|
|
|
entry = convert_subreddit_to_website(entry)
|
2022-04-08 10:53:57 +02:00
|
|
|
print_("Fixing links without protocol...")
|
|
|
|
entry = fix_no_protocol_urls(entry)
|
2022-04-07 14:43:57 +02:00
|
|
|
print_("Removing extras...")
|
|
|
|
entry = remove_extras(entry)
|
2022-04-10 13:06:05 +02:00
|
|
|
print_("Removing duplicate points...")
|
2022-04-10 11:56:10 +02:00
|
|
|
entry = remove_duplicate_points(entry)
|
2022-04-10 13:48:29 +02:00
|
|
|
print_("Updating center...")
|
2022-04-09 21:08:36 +02:00
|
|
|
entry = update_center(entry)
|
2022-04-15 13:55:51 +02:00
|
|
|
print_("Remove empty items...")
|
|
|
|
entry = remove_empty_and_similar(entry)
|
2022-04-07 17:03:30 +02:00
|
|
|
print_("Validating...")
|
2022-04-08 13:04:49 +02:00
|
|
|
status_code = validate(entry)
|
2022-04-07 07:01:09 +02:00
|
|
|
print_("Completed!")
|
2022-04-08 13:04:49 +02:00
|
|
|
return ( entry, status_code )
|
2022-04-07 07:01:09 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
|
|
|
def go(path):
|
|
|
|
|
|
|
|
print(f"Formatting {path}...")
|
|
|
|
|
|
|
|
with open(path, "r+", encoding='UTF-8') as f1:
|
|
|
|
entries = json.loads(f1.read())
|
|
|
|
|
|
|
|
for i in range(len(entries)):
|
2022-04-08 13:04:49 +02:00
|
|
|
entry_formatted, validation_status = format_all(entries[i], True)
|
|
|
|
if validation_status > 2:
|
|
|
|
print(f"Entry {entry_formatted['id']} will be removed! {json.dumps(entry_formatted)}")
|
|
|
|
entries[i] = None
|
|
|
|
else:
|
|
|
|
entries[i] = entry_formatted
|
2022-04-07 07:01:09 +02:00
|
|
|
if not (i % 500):
|
|
|
|
print(f"{i} checked.")
|
|
|
|
|
|
|
|
print(f"{len(entries)} checked.")
|
|
|
|
|
|
|
|
with open(path, "w", encoding='UTF-8') as f2:
|
|
|
|
f2.write(per_line_entries(entries))
|
|
|
|
|
|
|
|
print("Writing completed. All done.")
|
|
|
|
|
2022-04-09 21:08:36 +02:00
|
|
|
go("../web/atlas.json")
|