Merge remote-tracking branch 'upstream/remaster' into interface

This commit is contained in:
mxdanger 2022-04-21 17:33:30 -07:00
commit c34af77348
228 changed files with 3474 additions and 1863 deletions

24
.gitattributes vendored Normal file
View file

@ -0,0 +1,24 @@
* text=auto
*.bash text eol=lf
*.bat text eol=crlf
*.cmd text eol=crlf
*.css text diff=css
*.html text diff=html
*.js text
*.json text eol=lf
*.py text diff=python
*.sh text eol=lf
*.ico binary
*.jpg binary
*.jpeg binary
*.png binary
*.svg text
*.webp binary
*.ttf binary
*.eot binary
*.otf binary
*.woff binary
*.woff2 binary

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
web/atlas.json @placeAtlas/archival-team

View file

@ -1,2 +0,0 @@
/*
Access-Control-Allow-Origin: *

View file

View file

@ -8674,3 +8674,180 @@ u1j2if
u1in4u
u1icud
u2fp8n
u314dc
u2ziyc
u2zhof
u2zeqw
u2zbxw
u2zau5
u2z9u9
u2z8vq
u2z7yd
u2z5w0
u2z3h7
u2z1fk
u2z0i7
u2yyuq
u2yj7i
u2y7tn
u2wno2
u2w68c
u2w5d9
u2vtk6
u2vsc5
u2vqdl
u2veh4
u2vccn
u2upjt
u2teqf
u2ta3t
u2scps
u2s6ko
u2s2hq
u2rp04
u2oe4z
u2nm4h
u2mglu
u2mes6
u2kgt0
u2kgkx
u2kgam
u2jx5f
u2jwm8
u2jw9y
u2jv33
u2jpqe
u2j9e2
u2j34d
u2isv1
u2iljx
u2ikti
u2id8k
u2iar2
u2iaaj
u2i8dt
u2i77s
u2i6pj
u2i52l
u2i4c8
u2i3n3
u2i2s5
u2i2ay
u2i18l
u2hx8q
u2hw35
u2hq4n
u2hoxw
u2hndh
u2hmv9
u2hjn8
u2hfb1
u2heem
u2h3d5
u2h2uj
u2h2ba
u2gxgd
u2gmz0
u2gkze
u2gjfb
u2ggwy
u2gdu6
u2g3ho
u39w0p
u3956r
u38yox
u38y8w
u38xtp
u38x97
u38wn0
u38w7l
u38siz
u38rko
u38glf
u38fnb
u38eza
u38b71
u38ang
u38a1v
u389gh
u388kv
u385vc
u3856f
u384h1
u383k2
u382nl
u381h1
u37bs2
u37bal
u37b2x
u37abx
u37196
u3703q
u36yic
u36w1e
u36k9o
u360ch
u35z7z
u35y99
u352sv
u35255
u3505v
u34wnr
u343uv
u342ys
u3426y
u341c5
u33yqz
u33y72
u33tjn
u33syf
u33n2r
u40u61
u40diy
u402pe
u3zfyg
u3zbgo
u3z8s5
u3z0ym
u3yzud
u3yvxk
u3yuzv
u3ytml
u3ysji
u3yepe
u3xupi
u3xn44
u3xlz8
u3wnsl
u3wmdt
u3wlmr
u3wkqe
u3wigu
u3wf0o
u3wcn9
u3wbpf
u3umji
u3uftd
u3ueag
u3udwc
u3u038
u3sxkx
u3sq78
u3qf4m
u3pe7k
u3pc5u
u3o3ls
u3nhfo
u3lnul
u3llih
u3kmx3
u3kftg
u3ir6q
u3grqq
u3ghal
u3gei4
u3gcsl
u3ga5g
u3g3d6
u3bdkp
u39z7g

View file

@ -1,10 +0,0 @@
[[headers]]
for = "/*"
[headers.values]
Access-Control-Allow-Origin = "*"
[[headers]]
for = "/_img/place/*.png"
[headers.values]
# 28 days
cache-control = "public, max-age=604800"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

192
tools/calculate_center.py Normal file
View file

@ -0,0 +1,192 @@
"""
From https://github.com/Twista/python-polylabel/,
which is in turn implemented from https://github.com/mapbox/polylabel
"""
from math import sqrt, log10
import time
from typing import Tuple, List
# Python3
from queue import PriorityQueue
from math import inf
Point = Tuple[float, float]
Polygon = List[Point]
SQRT2 = sqrt(2)
def _point_to_polygon_distance(x: float, y: float, polygon: Polygon) -> float:
inside: bool = False
min_distance_squared: float = inf
previous: Point = polygon[-1]
for current in polygon:
if ((current[1] > y) != (previous[1] > y) and
(x < (previous[0] - current[0]) * (y - current[1]) / (previous[1] - current[1]) + current[0])):
inside = not inside
min_distance_squared = min(min_distance_squared, _get_segment_distance_squared(x, y, current, previous))
previous = current
result: float = sqrt(min_distance_squared)
if not inside:
return -result
return result
def _get_segment_distance_squared(px: float, py: float, point_a: Point, point_b: Point) -> float:
x: float = point_a[0]
y: float = point_a[1]
dx: float = point_b[0] - x
dy: float = point_b[1] - y
if dx != 0 or dy != 0:
t = ((px - x) * dx + (py - y) * dy) / (dx * dx + dy * dy)
if t > 1:
x = point_b[0]
y = point_b[1]
elif t > 0:
x += dx * t
y += dy * t
dx = px - x
dy = py - y
return dx * dx + dy * dy
class Cell(object):
def __init__(self, x: float, y: float, h: float, polygon: Polygon, centroid: Point):
self.h: float = h
self.y: float = y
self.x: float = x
min_dist = _point_to_polygon_distance(x, y, polygon)
self.min_dist: float = min_dist
self.center_dist: float = (centroid[0] - x) ** 2 + (centroid[1] - y) ** 2
self.max = self.min_dist + self.h * SQRT2
self.weight = -self.center_dist - self.max
def __lt__(self, other):
return self.max < other.max
def __lte__(self, other):
return self.max <= other.max
def __gt__(self, other):
return self.max > other.max
def __gte__(self, other):
return self.max >= other.max
def __eq__(self, other):
return self.max == other.max
def _get_centroid(polygon: Polygon) -> Point:
area: float = 0
x: float = 0
y: float = 0
previous: Point = polygon[-1]
for current in polygon:
f: float = current[0] * previous[1] - previous[0] * current[1]
x += (current[0] + previous[0]) * f
y += (current[1] + previous[1]) * f
area += f * 3
previous =current
if area == 0:
return (polygon[0][0], polygon[0][1])
return (x / area, y / area)
def _get_centroid_cell(polygon: Polygon, centroid: Point) -> Cell:
return Cell(centroid[0], centroid[1], 0, polygon, centroid)
def polylabel(polygon: Polygon, precision: float=0.5, debug: bool=False):
# find bounding box
first_item: Point = polygon[0]
min_x: float = first_item[0]
min_y: float = first_item[1]
max_x: float = first_item[0]
max_y: float = first_item[1]
for p in polygon:
if p[0] < min_x:
min_x = p[0]
if p[1] < min_y:
min_y = p[1]
if p[0] > max_x:
max_x = p[0]
if p[1] > max_y:
max_y = p[1]
width: float = max_x - min_x
height: float = max_y - min_y
cell_size: float = min(width, height)
h: float = cell_size / 2.0
cell_queue: PriorityQueue[Tuple[float, int, Cell]] = PriorityQueue()
if cell_size == 0:
return [(max_x - min_x) / 2, (max_y - min_y) / 2]
centroid: Point = _get_centroid(polygon)
# cover polygon with initial cells
x: float = min_x
while x < max_x:
y: float = min_y
while y < max_y:
c: Cell = Cell(x + h, y + h, h, polygon, centroid)
y += cell_size
cell_queue.put((c.weight, time.time(), c))
x += cell_size
best_cell: Cell = _get_centroid_cell(polygon, centroid)
bbox_cell: Cell = Cell(min_x + width / 2, min_y + height / 2, 0, polygon, centroid)
if bbox_cell.min_dist > best_cell.min_dist:
best_cell = bbox_cell
# how much closer is an point allowed to be to the border,
# while having a shorter distance to the centroid
threshold: float = log10(cell_size) / 3.0
num_of_probes = cell_queue.qsize()
while not cell_queue.empty():
_, __, cell = cell_queue.get()
# update if either the cell is further from the edge,
# or if it is sufficiently similary far from the edge,
# but closer to the centroid
if (cell.min_dist > best_cell.min_dist
or (
cell.center_dist < best_cell.center_dist
and cell.min_dist > best_cell.min_dist - threshold
)
):
best_cell = cell
if debug:
print(f'found best {round(cell.min_dist, 4)};{round(sqrt(cell.center_dist), 4)} after {num_of_probes} probes')
if cell.max - best_cell.min_dist <= precision:
continue
h = cell.h / 2
c = Cell(cell.x - h, cell.y - h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x + h, cell.y - h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x - h, cell.y + h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x + h, cell.y + h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
num_of_probes += 4
if debug:
print(f'num probes: {num_of_probes}')
print(f'best distance: {best_cell.min_dist}')
return [best_cell.x, best_cell.y]

View file

@ -1,350 +1,472 @@
#!/usr/bin/python
import re
import json
"""
Examples:
1. - /r/place
- r/place
2. /rplace
3. - https://www.reddit.com/r/place
- www.reddit.com/r/place
- reddit.com/r/place
UNUSED AND FAULTY
4. - https://place.reddit.com
- place.reddit.com
5. - [https://place.reddit.com](https://place.reddit.com)
- [place.reddit.com](https://place.reddit.com)
"""
FS_REGEX = {
"commatization": r'( *(,+ +|,+ |,+)| +)(and|&|;)( *(,+ +|,+ |,+)| +)|, *$| +',
"pattern1": r'\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern2": r'^\/*[rR](?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern3": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
# "pattern4": r'(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*',
# "pattern5": r'\[(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\]\((?:https:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\)"',
}
VALIDATE_REGEX = {
"subreddit": r'^ *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *(, *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *)*$|^$',
"website": r'^https?://[^\s/$.?#].[^\s]*$|^$'
}
CL_REGEX = r'\[(.+?)\]\((.+?)\)'
CWTS_REGEX = {
"url": r'^(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/)$',
"subreddit": r'^\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})\/?$'
}
CSTW_REGEX = {
"website": r'^https?://[^\s/$.?#].[^\s]*$',
"user": r'^\/*u\/([A-Za-z0-9][A-Za-z0-9_]{1,20})$'
}
# r/... to /r/...
SUBREDDIT_TEMPLATE = r"/r/\1"
USER_TEMPLATE = r"/u/\1"
def format_subreddit(entry: dict):
"""
Fix formatting of the value on "subreddit".
"""
if not "subreddit" in entry or not entry['subreddit']:
return entry
subredditLink = entry["subreddit"]
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
if not subredditLink:
return entry
entry["subreddit"] = subredditLink
return entry
def collapse_links(entry: dict):
if "website" in entry and entry['website']:
website = entry["website"];
if re.search(CL_REGEX, website):
match = re.search(CL_REGEX, website)
if match.group(1) == match.group(2):
website = match.group(2)
entry["website"] = website
if "subreddit" in entry and entry['subreddit']:
subreddit = entry["subreddit"];
if re.search(CL_REGEX, subreddit):
match = re.search(CL_REGEX, subreddit)
if match.group(1) == match.group(2):
subreddit = match.group(2)
entry["subreddit"] = subreddit
return entry
def remove_extras(entry: dict):
"""
Removing unnecessary extra characters and converts select characters.
"""
if "subreddit" in entry and entry["subreddit"]:
# if not entry["subreddit"].startswith('/r/'):
# entry["subreddit"] = re.sub(r'^(.*)(?=\/r\/)', r'', entry["subreddit"])
entry["subreddit"] = re.sub(r'[.,]+$', r'', entry["subreddit"])
for key in entry:
if not entry[key] or not isinstance(entry[key], str):
continue
# Leading and trailing spaces
entry[key] = entry[key].strip()
# Double characters
entry[key] = re.sub(r' {2,}(?!\n)', r' ', entry[key])
entry[key] = re.sub(r' {3,}\n', r' ', entry[key])
entry[key] = re.sub(r'\n{3,}', r'\n\n', entry[key])
entry[key] = re.sub(r'r\/{2,}', r'r\/', entry[key])
entry[key] = re.sub(r',{2,}', r',', entry[key])
# Smart quotation marks
entry[key] = re.sub(r'[\u201c\u201d]', '"', entry[key])
entry[key] = re.sub(r'[\u2018\u2019]', "'", entry[key])
# Psuedo-empty strings
if entry[key] in ["n/a", "N/A", "na", "NA", "-", "null", "none", "None"]:
entry[key] = ""
return entry
def remove_duplicate_points(entry: dict):
"""
Removes points from paths that occur twice after each other
"""
path: list = entry['path']
previous: list = path[0]
for i in range(len(path)-1, -1, -1):
current: list = path[i]
if current == previous:
path.pop(i)
previous = current
return entry
def fix_r_caps(entry: dict):
"""
Fixes capitalization of /r/. (/R/place -> /r/place)
"""
if not "description" in entry or not entry['description']:
return entry
entry["description"] = re.sub(r'([^\w]|^)\/R\/', '\1/r/', entry["description"])
entry["description"] = re.sub(r'([^\w]|^)R\/', '\1r/', entry["description"])
return entry
def fix_no_protocol_urls(entry: dict):
"""
Fixes URLs with no protocol by adding "https://" protocol.
"""
if not "website" in entry or not entry['website']:
return entry
if not entry["website"].startswith("http"):
entry["website"] = "https://" + entry["website"]
return entry
def convert_website_to_subreddit(entry: dict):
"""
Converts the subreddit link on "website" to "subreddit" if possible.
"""
if not "website" in entry or not entry['website']:
return entry
if re.match(CWTS_REGEX["url"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
return entry
def convert_subreddit_to_website(entry: dict):
"""
Converts the links on "subreddit" to a "website" if needed. This also supports Reddit users (/u/reddit).
"""
if not "subreddit" in entry or not entry['subreddit']:
return entry
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
if (entry["website"].lower() == entry["subreddit"].lower()):
entry["subreddit"] = ""
elif not "website" in entry or entry['website'] == "":
entry["website"] = entry["subreddit"]
entry["subreddit"] = ""
elif re.match(CSTW_REGEX["user"], entry["subreddit"]):
if not "website" in entry or entry['website'] == "":
username = re.match(CSTW_REGEX["user"], entry["subreddit"]).group(1)
entry["website"] = "https://www.reddit.com/user/" + username
entry["subreddit"] = ""
return entry
def calculate_center(path: list):
"""
Caluclates the center of a polygon
adapted from /web/_js/draw.js:calucalteCenter()
"""
area = 0
x = 0
y = 0
for i in range(len(path)):
point1 = path[i]
point2 = path[i-1 if i != 0 else len(path)-1]
f = point1[0] * point2[1] - point2[0] * point1[1]
area += f
x += (point1[0] + point2[0]) * f
y += (point1[1] + point2[1]) * f
area *= 3
if area != 0:
return [x // area + 0.5, y // area + 0.5]
else:
# get the center of a straight line
max_x = max(i[0] for i in path)
min_x = min(i[0] for i in path)
max_y = max(i[1] for i in path)
min_y = min(i[1] for i in path)
return [(max_x + min_x) // 2 + 0.5, (max_y + min_y) // 2 + 0.5]
def update_center(entry: dict):
"""
checks if the center of a entry is up to date, and updates it if it's either missing or outdated
"""
if 'path' not in entry:
return entry
path = entry['path']
if len(path) > 1:
calculated_center = calculate_center(path)
if 'center' not in entry or entry['center'] != calculated_center:
entry['center'] = calculated_center
return entry
def validate(entry: dict):
"""
Validates the entry. Catch errors and tell warnings related to the entry.
Status code key:
0: All valid, no problems
1: Informational logs that may be ignored
2: Warnings that may effect user experience when interacting with the entry
3: Errors that make the entry inaccessible or broken.
"""
return_status = 0
if (not "id" in entry or (not entry['id'] and not entry['id'] == 0)):
print(f"Wait, no id here! How did this happened? {entry}")
return_status = 3
entry['id'] = '[MISSING_ID]'
if not ("path" in entry and isinstance(entry["path"], list) and len(entry["path"]) > 0):
print(f"Entry {entry['id']} has no points!")
return_status = 3
elif len(entry["path"]) < 3:
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
return_status = 3
for key in entry:
if key in VALIDATE_REGEX and not re.match(VALIDATE_REGEX[key], entry[key]):
if return_status < 2: return_status = 2
print(f"{key} of entry {entry['id']} is still invalid! {entry[key]}")
return return_status
def per_line_entries(entries: list):
"""
Returns a string of all the entries, with every entry in one line.
"""
out = "[\n"
for entry in entries:
if entry:
out += json.dumps(entry, ensure_ascii=False) + ",\n"
out = out[:-2] + "\n]"
return out
def format_all(entry: dict, silent=False):
"""
Format using all the available formatters.
Outputs a tuple containing the entry and the validation status code.
Status code key:
0: All valid, no problems
1: Informational logs that may be ignored
2: Warnings that may effect user experience when interacting with the entry
3: Errors that make the entry inaccessible or broken.
"""
def print_(*args, **kwargs):
if not silent:
print(*args, **kwargs)
print_("Fixing r/ capitalization...")
entry = fix_r_caps(entry)
print_("Fix formatting of subreddit...")
entry = format_subreddit(entry)
print_("Collapsing Markdown links...")
entry = collapse_links(entry)
print_("Converting website links to subreddit (if possible)...")
entry = convert_website_to_subreddit(entry)
print_("Converting subreddit links to website (if needed)...")
entry = convert_subreddit_to_website(entry)
print_("Fixing links without protocol...")
entry = fix_no_protocol_urls(entry)
print_("Removing extras...")
entry = remove_extras(entry)
print_("Removing duplicate points...")
entry = remove_duplicate_points(entry)
print_("Updating center...")
entry = update_center(entry)
print_("Validating...")
status_code = validate(entry)
print_("Completed!")
return ( entry, status_code )
if __name__ == '__main__':
def go(path):
print(f"Formatting {path}...")
with open(path, "r+", encoding='UTF-8') as f1:
entries = json.loads(f1.read())
for i in range(len(entries)):
entry_formatted, validation_status = format_all(entries[i], True)
if validation_status > 2:
print(f"Entry {entry_formatted['id']} will be removed! {json.dumps(entry_formatted)}")
entries[i] = None
else:
entries[i] = entry_formatted
if not (i % 500):
print(f"{i} checked.")
print(f"{len(entries)} checked.")
with open(path, "w", encoding='UTF-8') as f2:
f2.write(per_line_entries(entries))
print("Writing completed. All done.")
go("../web/atlas.json")
#!/usr/bin/python
import re
import json
import math
from calculate_center import polylabel
"""
Examples:
1. - /r/place
- r/place
2. /rplace
3. - https://www.reddit.com/r/place
- www.reddit.com/r/place
- reddit.com/r/place
UNUSED AND FAULTY
4. - https://place.reddit.com
- place.reddit.com
5. - [https://place.reddit.com](https://place.reddit.com)
- [place.reddit.com](https://place.reddit.com)
"""
FS_REGEX = {
"commatization": r'( *(,+ +|,+ |,+)| +)(and|&|;)( *(,+ +|,+ |,+)| +)|, *$| +',
"pattern1": r'\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
"pattern2": r'^\/*[rR](?!\/)([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
"pattern3": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*',
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*',
"pattern1new": r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*'
# "pattern4": r'(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*',
# "pattern5": r'\[(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\]\((?:https:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\)"',
}
VALIDATE_REGEX = {
"subreddit": r'^ *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{2,20}) *(, *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{2,20}) *)*$|^$',
"website": r'^https?://[^\s/$.?#].[^\s]*$|^$'
}
CL_REGEX = r'\[(.+?)\]\((.+?)\)'
CWTS_REGEX = {
"url": r'^(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/)$',
"subreddit": r'^\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})\/?$'
}
CSTW_REGEX = {
"website": r'^https?://[^\s/$.?#].[^\s]*$',
"user": r'^\/*u\/([A-Za-z0-9][A-Za-z0-9_]{2,20})$'
}
# r/... to /r/...
SUBREDDIT_TEMPLATE = r"/r/\1"
USER_TEMPLATE = r"/u/\1"
def format_subreddit(entry: dict):
"""
Fix formatting of the value on "subreddit".
"""
if "subreddit" in entry and entry["subreddit"]:
subredditLink = entry["subreddit"]
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
entry["subreddit"] = subredditLink
if "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
subredditLink = entry["links"]["subreddit"][i]
subredditLink = re.sub(FS_REGEX["pattern3"], r"\1", subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1new"], r"\1", subredditLink)
entry["links"]["subreddit"][i] = subredditLink
return entry
def collapse_links(entry: dict):
"""
Collapses Markdown links.
"""
if "website" in entry and entry['website']:
website = entry["website"]
if re.search(CL_REGEX, website):
match = re.search(CL_REGEX, website)
if match.group(1) == match.group(2):
website = match.group(2)
entry["website"] = website
elif "links" in entry and "website" in entry["links"]:
for i in range(len(entry["links"]["website"])):
website = entry["links"]["website"][i]
if re.search(CL_REGEX, website):
match = re.search(CL_REGEX, website)
if match.group(1) == match.group(2):
website = match.group(2)
entry["links"]["website"][i] = website
if "subreddit" in entry and entry['subreddit']:
subreddit = entry["subreddit"]
if re.search(CL_REGEX, subreddit):
match = re.search(CL_REGEX, subreddit)
if match.group(1) == match.group(2):
subreddit = match.group(2)
entry["subreddit"] = subreddit
elif "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
subreddit = entry["links"]["subreddit"][i]
if re.search(CL_REGEX, subreddit):
match = re.search(CL_REGEX, subreddit)
if match.group(1) == match.group(2):
subreddit = match.group(2)
entry["links"]["subreddit"][i] = subreddit
return entry
def remove_extras(entry: dict):
"""
Removing unnecessary extra characters and converts select characters.
"""
if "subreddit" in entry and entry["subreddit"]:
# if not entry["subreddit"].startswith('/r/'):
# entry["subreddit"] = re.sub(r'^(.*)(?=\/r\/)', r'', entry["subreddit"])
entry["subreddit"] = re.sub(r'[.,]+$', r'', entry["subreddit"])
for key in entry:
if not entry[key] or not isinstance(entry[key], str):
continue
# Leading and trailing spaces
entry[key] = entry[key].strip()
# Double characters
entry[key] = re.sub(r' {2,}(?!\n)', r' ', entry[key])
entry[key] = re.sub(r' {3,}\n', r' ', entry[key])
entry[key] = re.sub(r'\n{3,}', r'\n\n', entry[key])
entry[key] = re.sub(r'r\/{2,}', r'r\/', entry[key])
entry[key] = re.sub(r',{2,}', r',', entry[key])
# Smart quotation marks
entry[key] = re.sub(r'[\u201c\u201d]', '"', entry[key])
entry[key] = re.sub(r'[\u2018\u2019]', "'", entry[key])
# Psuedo-empty strings
if entry[key] in ["n/a", "N/A", "na", "NA", "-", "null", "none", "None"]:
entry[key] = ""
return entry
def remove_duplicate_points(entry: dict):
"""
Removes points from paths that occur twice after each other
"""
if not "path" in entry:
return entry
if isinstance(entry['path'], list):
path: list = entry['path']
previous: list = path[0]
for i in range(len(path)-1, -1, -1):
current: list = path[i]
if current == previous:
path.pop(i)
previous = current
else:
for key in entry['path']:
path: list = entry['path'][key]
previous: list = path[0]
for i in range(len(path)-1, -1, -1):
current: list = path[i]
if current == previous:
path.pop(i)
previous = current
return entry
def fix_r_caps(entry: dict):
"""
Fixes capitalization of /r/. (/R/place -> /r/place)
"""
if not "description" in entry or not entry['description']:
return entry
entry["description"] = re.sub(r'([^\w]|^)\/R\/', '\1/r/', entry["description"])
entry["description"] = re.sub(r'([^\w]|^)R\/', '\1r/', entry["description"])
return entry
def fix_no_protocol_urls(entry: dict):
"""
Fixes URLs with no protocol by adding "https://" protocol.
"""
if "links" in entry and "website" in entry['links']:
for i in range(len(entry["links"]["website"])):
if entry["links"]["website"][i] and not entry["links"]["website"][i].startswith("http"):
entry["links"]["website"][i] = "https://" + entry["website"]
elif "website" in entry and entry['website']:
if not entry["website"].startswith("http"):
entry["website"] = "https://" + entry["website"]
return entry
def convert_website_to_subreddit(entry: dict):
"""
Converts the subreddit link on "website" to "subreddit" if possible.
"""
if "links" in entry and "website" in entry["links"]:
for i in range(len(entry["links"]["website"])):
if re.match(CWTS_REGEX["url"], entry["links"]["website"][i]):
new_subreddit = re.sub(CWTS_REGEX["url"], r"\1", entry["links"]["website"][i])
if new_subreddit in entry["links"]["subreddit"]:
entry["links"]["website"][i] = ""
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
if not "subreddit" in entry["links"]:
entry["links"]["subreddit"] = []
entry["links"]["subreddit"].append(new_subreddit)
entry["links"]["website"][i] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["links"]["website"][i]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], r"\1", entry["links"]["website"][i])
if new_subreddit in entry["links"]["subreddit"]:
entry["links"]["website"][i] = ""
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
if not "subreddit" in entry["links"]:
entry["links"]["subreddit"] = []
entry["links"]["subreddit"].append(new_subreddit)
entry["links"]["website"][i] = ""
elif "website" in entry and entry['website']:
if re.match(CWTS_REGEX["url"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
return entry
def convert_subreddit_to_website(entry: dict):
"""
Converts the links on "subreddit" to a "website" if needed. This also supports Reddit users (/u/reddit).
"""
if "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
if re.match(CSTW_REGEX["website"], entry["links"]["subreddit"][i]):
if "website" in entry["links"] and entry["links"]["subreddit"][i] in entry["links"]["website"]:
entry["links"]["subreddit"][i] = ""
elif not "website" in entry["links"] or len(entry["website"]) == 0:
if not "website" in entry["links"]:
entry["links"]["website"] = []
entry["website"].append(entry["links"]["subreddit"][i])
entry["links"]["subreddit"][i] = ""
elif re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]):
if not "website" in entry["links"] or len(entry["website"]) == 0:
username = re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]).group(1)
if not "website" in entry["links"]:
entry["links"]["website"] = []
entry["website"].append("https://www.reddit.com/user/" + username)
entry["links"]["subreddit"][i] = ""
elif "subreddit" in entry and entry['subreddit']:
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
if (entry["website"].lower() == entry["subreddit"].lower()):
entry["subreddit"] = ""
elif not "website" in entry or entry['website'] == "":
entry["website"] = entry["subreddit"]
entry["subreddit"] = ""
elif re.match(CSTW_REGEX["user"], entry["subreddit"]):
if not "website" in entry or entry['website'] == "":
username = re.match(CSTW_REGEX["user"], entry["subreddit"]).group(1)
entry["website"] = "https://www.reddit.com/user/" + username
entry["subreddit"] = ""
return entry
def calculate_center(path: list):
"""
Caluclates the center of a polygon
"""
result = polylabel(path)
return [math.floor(result[0]) + 0.5, math.floor(result[1]) + 0.5]
def update_center(entry: dict):
"""
checks if the center of a entry is up to date, and updates it if it's either missing or outdated.
"""
if 'path' not in entry:
return entry
if isinstance(entry['path'], list):
path = entry['path']
if len(path) > 1:
entry['center'] = calculate_center(path)
else:
for key in entry['path']:
path = entry['path'][key]
if len(path) > 1:
entry['center'][key] = calculate_center(path)
return entry
def remove_empty_and_similar(entry: dict):
"""
Removes empty items on lists, usually from the past formattings.
"""
if "links" in entry:
for key in entry["links"]:
small = list(map(lambda x: x.lower(), entry["links"][key]))
entry["links"][key] = [x for x in entry["links"][key] if x and x.lower() in small]
return entry
def validate(entry: dict):
"""
Validates the entry. Catch errors and tell warnings related to the entry.
Status code key:
0: All valid, no problems
1: Informational logs that may be ignored
2: Warnings that may effect user experience when interacting with the entry
3: Errors that make the entry inaccessible or broken.
"""
return_status = 0
if (not "id" in entry or (not entry['id'] and not entry['id'] == 0)):
print(f"Wait, no id here! How did this happened? {entry}")
return_status = 3
entry['id'] = '[MISSING_ID]'
if "path" in entry:
if isinstance(entry['path'], list):
if len(entry["path"]) == 0:
print(f"Entry {entry['id']} has no points!")
return_status = 3
elif len(entry["path"]) < 3:
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
return_status = 3
else:
for key in entry['path']:
path = entry['path'][key]
if len(path) == 0:
print(f"Period {key} of entry {entry['id']} has no points!")
return_status = 3
elif len(path) < 3:
print(f"Period {key} of entry {entry['id']} only has {len(entry['path'])} point(s)!")
return_status = 3
else:
print(f"Entry {entry['id']} has no path at all!")
return_status = 3
for key in entry:
if key in VALIDATE_REGEX and not re.match(VALIDATE_REGEX[key], entry[key]):
if return_status < 2: return_status = 2
print(f"{key} of entry {entry['id']} is still invalid! {entry[key]}")
return return_status
def per_line_entries(entries: list):
"""
Returns a string of all the entries, with every entry in one line.
"""
out = "[\n"
for entry in entries:
if entry:
out += json.dumps(entry, ensure_ascii=False) + ",\n"
out = out[:-2] + "\n]"
return out
def format_all(entry: dict, silent=False):
"""
Format using all the available formatters.
Outputs a tuple containing the entry and the validation status code.
Status code key:
0: All valid, no problems
1: Informational logs that may be ignored
2: Warnings that may effect user experience when interacting with the entry
3: Errors that make the entry inaccessible or broken.
"""
def print_(*args, **kwargs):
if not silent:
print(*args, **kwargs)
print_("Fixing r/ capitalization...")
entry = fix_r_caps(entry)
print_("Fix formatting of subreddit...")
entry = format_subreddit(entry)
print_("Collapsing Markdown links...")
entry = collapse_links(entry)
print_("Converting website links to subreddit (if possible)...")
entry = convert_website_to_subreddit(entry)
print_("Converting subreddit links to website (if needed)...")
entry = convert_subreddit_to_website(entry)
print_("Fixing links without protocol...")
entry = fix_no_protocol_urls(entry)
print_("Removing extras...")
entry = remove_extras(entry)
print_("Removing duplicate points...")
entry = remove_duplicate_points(entry)
print_("Updating center...")
entry = update_center(entry)
print_("Remove empty items...")
entry = remove_empty_and_similar(entry)
print_("Validating...")
status_code = validate(entry)
print_("Completed!")
return ( entry, status_code )
if __name__ == '__main__':
def go(path):
print(f"Formatting {path}...")
with open(path, "r+", encoding='UTF-8') as f1:
entries = json.loads(f1.read())
for i in range(len(entries)):
entry_formatted, validation_status = format_all(entries[i], True)
if validation_status > 2:
print(f"Entry {entry_formatted['id']} will be removed! {json.dumps(entry_formatted)}")
entries[i] = None
else:
entries[i] = entry_formatted
if not (i % 200):
print(f"{i} checked.")
print(f"{len(entries)} checked.")
with open(path, "w", encoding='utf-8', newline='\n') as f2:
f2.write(per_line_entries(entries))
print("Writing completed. All done.")
go("../web/atlas.json")

61
tools/merge_out.py Normal file
View file

@ -0,0 +1,61 @@
import praw
import json
import time
import re
import os
import traceback
from formatter import format_all, per_line_entries
out_ids = []
out_dupe_ids = []
out_edited_added_ids = []
atlas_ids = []
with open('temp_atlas.json', 'r', encoding='utf-8') as out_file:
out_json = json.loads(out_file.read())
with open('../web/atlas.json', 'r', encoding='utf-8') as atlas_file:
atlas_json = json.loads(atlas_file.read())
for entry in atlas_json:
atlas_ids.append(entry['id'])
for entry in out_json:
if (entry['id'] in out_ids):
print(f"Entry {entry['id']} has duplicates! Please resolve this conflict. This will be excluded from the merge.")
out_dupe_ids.append(entry['id'])
out_ids.append(entry['id'])
for entry in out_json:
if entry['id'] in out_dupe_ids:
continue
if 'edit' in entry and entry['edit']:
index = next((i for i, item in enumerate(atlas_json) if item["id"] == entry['id']), None)
assert index != None, "Edit failed! ID not found on Atlas."
print(f"Edited {atlas_json[index]['id']} with {entry['edit']}")
if 'edit' in entry:
out_edited_added_ids.append(entry['edit'])
del entry['edit']
if 'submitted_by' in atlas_json[index]:
atlas_json[index]['contributors'] = [ atlas_json[index]['submitted_by'] ]
elif not 'contributors' in atlas_json[index]:
atlas_json[index]['contributors'] = []
entry['contributors'] = atlas_json[index]['contributors'] + list(set(entry['contributors']) - set(atlas_json[index]['contributors']))
atlas_json[index] = entry
else:
print(f"Added {entry['id']}.")
atlas_json.append(entry)
print('Writing...')
with open('../web/atlas.json', 'w', encoding='utf-8') as atlas_file:
atlas_file.write(per_line_entries(atlas_json))
with open('../data/read-ids.txt', 'a', encoding='utf-8') as read_ids_file:
with open('read-ids-temp.txt', 'r', encoding='utf-8') as read_ids_temp_file:
read_ids_file.writelines(read_ids_temp_file.readlines())
print('All done.')

View file

@ -0,0 +1,105 @@
import os
import json
import re
'''
Migrator script from old atlas format to remastered atlas format.
- center and path: single -> time-specific
- website and subreddit: single strings -> links object
- submitted_by -> contributors
'''
#
# Migrates the old atlas format (single center/path) to the remastered atlas format (time-boxed centers/paths)
def per_line_entries(entries: list):
out = '[\n'
for entry in entries:
out += json.dumps(entry, ensure_ascii=False) + ',\n'
return out[:-2] + '\n]'
file_path = os.path.join('..', 'web', 'atlas.json')
END_IMAGE = 166
INIT_CANVAS_RANGE = (1, END_IMAGE)
EXPANSION_1_RANGE = (56, END_IMAGE)
EXPANSION_2_RANGE = (109, END_IMAGE)
COMMATIZATION = re.compile(r'(?: *(?:,+ +|,+ |,+)| +)(?:and|&|;)(?: *(?:,+ +|,+ |,+)| +)|, *$| +')
FS_REGEX = re.compile(r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*')
with open(file_path, 'r+', encoding='UTF-8') as file:
entries = json.loads(file.read())
index = 0
for entry in entries:
new_entry = {
"id": "",
"name": "",
"description": "",
"links": {},
"center": {},
"path": {},
"contributors": []
}
center = entry['center']
path = entry['path']
if isinstance(center, list):
# Use the center to figure out which canvas expansion the entry is in.
if center[1] > 1000:
time_range = EXPANSION_2_RANGE
elif center[0] > 1000:
time_range = EXPANSION_1_RANGE
else:
time_range = INIT_CANVAS_RANGE
time_key = '%d-%d, T:0-2' % time_range
new_entry = {
**new_entry,
"center": {
time_key: center
},
"path": {
time_key: path
}
}
del entry['center']
del entry['path']
if "website" in entry:
if isinstance(entry["website"], str) and entry["website"]:
new_entry['links']['website'] = [entry['website']]
del entry['website']
if "subreddit" in entry:
if isinstance(entry["subreddit"], str) and entry["subreddit"]:
new_entry['links']['subreddit'] = list(map(lambda x: FS_REGEX.sub(r"\1", x), COMMATIZATION.split(entry['subreddit'])))
del entry['subreddit']
if "submitted_by" in entry:
new_entry['contributors'].append(entry['submitted_by'])
del entry['submitted_by']
entries[index] = {
**new_entry,
**entry
}
index += 1
if not (index % 1000):
print(f"{index} checked.")
print(f"{len(entries)} checked.")
print("Writing...")
with open(file_path, 'w', encoding='utf-8', newline='\n') as f2:
f2.write(per_line_entries(entries))
print("All done!")

View file

@ -1,14 +1,40 @@
"""
Auth setup
1. Head to https://www.reddit.com/prefs/apps
2. Click "create another app"
3. Give it a name and description
4. Select "script"
5. Redirect to http://localhost:8080
6. Create file "credentials" with the format below
[ID] <- Under "personal use script"
[Secret]
[Username] <- Must be a mod, don't do this if you │
[Password] <- don't know what you are doing. │
7. Run Script
Running Script
1. Input the next ID to use
2. Manually resolve errors in manual_atlas.json
3 a. Use merge_out.py, or...
b. a. Copy temp_atlas.json entries into web/_js/atlas.js (mind the edits!)
b. Copy read-ids-temp.txt IDs into data/read-ids.txt
5. Create a pull request
"""
import praw
import json
import time
import re
import os
import traceback
from formatter import format_all
outfile = open('temp_atlas.json', 'w', encoding='utf-8')
editidsfile = open('read-ids-temp.txt', 'w')
failfile = open('manual_atlas.json', 'w', encoding='utf-8')
OUT_FILE = open('temp_atlas.json', 'w', encoding='utf-8')
READ_IDS_FILE = open('read-ids-temp.txt', 'w')
FAIL_FILE = open('manual_atlas.txt', 'w', encoding='utf-8')
OUT_FILE_LINES = ['[\n', ']\n']
with open('credentials', 'r') as file:
credentials = file.readlines()
@ -32,10 +58,21 @@
existing_ids = []
with open('../data/edit-ids.txt', 'r') as edit_ids_file:
with open('../data/read-ids.txt', 'r') as edit_ids_file:
for id in [x.strip() for x in edit_ids_file.readlines()]:
existing_ids.append(id)
# with open('../web/atlas.json', 'r') as atlas_file:
# atlas_raw: list = json.loads(atlas_file)
# atlas = {}
# atlas_ids = []
# for index in atlas_raw:
# entry = atlas_raw[index]
# id = entry['id']
# del entry['id']
# atlas[id] = entry
# atlas_ids.append(id)
def set_flair(submission, flair):
if has_write_access and submission.link_flair_text != flair:
flair_choices = submission.flair.choices()
@ -48,31 +85,7 @@ def set_flair(submission, flair):
successcount = 0
totalcount = 0
outfile.write("[\n")
for submission in reddit.subreddit('placeAtlas2').new(limit=2000):
"""
Auth setup
1. Head to https://www.reddit.com/prefs/apps
2. Click "create another app"
3. Give it a name and description
4. Select "script"
5. Redirect to http://localhost:8080
6. Create file "credentials" with the format below.
[ID] <- Under "personal use script"
[Secret]
[Username] <- Must be a mod, don't do this if you │
[Password] <- don't know what you are doing. │
7. Run Script
Running Script
1. Input the next ID to use
2. Manually resolve errors in manual_atlas.json
3. Copy temp_atlas.json entries into web/_js/atlas.js
4. Pull Request
"""
total_all_flairs += 1
if (submission.id in existing_ids):
@ -83,8 +96,8 @@ def set_flair(submission, flair):
break
else:
continue
if (submission.link_flair_text == "New Entry"):
if submission.link_flair_text == "New Entry" or submission.link_flair_text == "Edit Entry":
try:
@ -92,7 +105,7 @@ def set_flair(submission, flair):
rawtext = text
text = text.replace('\u200c', '')
text = re.compile(r".*(\{.+\}).*", re.DOTALL).search(text).group(1)
text = re.compile(r"(\{.+\})", re.DOTALL).search(text).group(0)
# Test if it needs to escape the escape character. Usually happens on fancy mode.
try: json.loads(text)
except json.JSONDecodeError: text = re.sub(r"\\(.)", r"\1", text)
@ -101,11 +114,32 @@ def set_flair(submission, flair):
if submission_json:
submission_json_dummy = {"id": submission.id, "submitted_by": ""}
try:
submission_json_dummy["submitted_by"] = submission.author.name
except AttributeError:
submission_json_dummy["submitted_by"] = "unknown"
if submission.link_flair_text == "Edit Entry":
assert submission_json["id"] != 0, "Edit invalid because ID is tampered, it must not be 0!"
submission_json_dummy = {"id": submission_json["id"], "edit": submission.id}
submission_json["contributors"] = []
try:
if not submission.author.name in submission_json:
submission_json["contributors"].append(submission.author.name)
except AttributeError:
pass
else:
assert submission_json["id"] == 0, "Edit invalid because ID is tampered, it must be 0!"
submission_json_dummy = {"id": submission.id}
submission_json["contributors"] = []
try:
if not submission.author.name in submission_json:
submission_json["contributors"].append(submission.author.name)
except AttributeError:
pass
for key in submission_json:
if not key in submission_json_dummy:
submission_json_dummy[key] = submission_json[key];
@ -113,15 +147,16 @@ def set_flair(submission, flair):
assert validation_status < 3, \
"Submission invalid after validation. This may be caused by not enough points on the path."
outfile.write(json.dumps(submission_json, ensure_ascii=False) + ",\n")
editidsfile.write(submission.id + '\n')
OUT_FILE_LINES[len(OUT_FILE_LINES) - 2].replace('\n', ',\n')
OUT_FILE_LINES.insert(len(OUT_FILE_LINES) - 1, json.dumps(submission_json, ensure_ascii=False) + '\n')
READ_IDS_FILE.write(submission.id + '\n')
successcount += 1
set_flair(submission, "Processed Entry")
except Exception as e:
failfile.write(
"\n\n" + "="*40 + "\n\n" +
FAIL_FILE.write(
"\n\n" + "="*40 + "\n\nSubmission ID: " +
submission.id + "\n\n" +
traceback.format_exc() + "\n\n" +
"==== RAW ====" + "\n\n" +
@ -132,13 +167,9 @@ def set_flair(submission, flair):
failcount += 1
set_flair(submission, "Rejected Entry")
print("Wrote "+submission.id+", submitted "+str(round(time.time()-submission.created_utc))+" seconds ago")
print("Wrote " + submission.id + ", submitted " + str(round(time.time()-submission.created_utc)) + " seconds ago")
totalcount += 1
# Remove last trailing comma
outfile.seek(outfile.tell()-3, os.SEEK_SET)
outfile.truncate()
OUT_FILE.writelines(OUT_FILE_LINES)
outfile.write("\n]")
print(f"\n\nTotal all flairs:{total_all_flairs}\nSuccess: {successcount}/{totalcount}\nFail: {failcount}/{totalcount}\nPlease check manual_atlas.txt for failed entries to manually resolve.")
print(f"\n\nTotal all flairs: {total_all_flairs}\nSuccess: {successcount}/{totalcount}\nFail: {failcount}/{totalcount}\nPlease check manual_atlas.txt for failed entries to manually resolve.")

BIN
tools/unused/area-chart.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View file

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

0
tools/combine.sh → tools/unused/combine.sh Executable file → Normal file
View file

View file

@ -365,4 +365,4 @@ body:not([data-dev]) .show-only-on-dev {
.nav-link.active {
font-weight: 700;
}
}

11
web/_headers Normal file
View file

@ -0,0 +1,11 @@
/*
Access-Control-Allow-Origin: *
/_img/place/*.png
cache-control: public, max-age=604800
/_img/canvas/*/*.png
cache-control: public, max-age=604800
/_img/canvas/*.png
cache-control: public, max-age=604800

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 345 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 368 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 551 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 342 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 566 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 243 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 295 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 289 KiB

Some files were not shown because too many files have changed in this diff Show more