Merge remote-tracking branch 'upstream/remaster' into interface
24
.gitattributes
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
* text=auto
|
||||
|
||||
*.bash text eol=lf
|
||||
*.bat text eol=crlf
|
||||
*.cmd text eol=crlf
|
||||
*.css text diff=css
|
||||
*.html text diff=html
|
||||
*.js text
|
||||
*.json text eol=lf
|
||||
*.py text diff=python
|
||||
*.sh text eol=lf
|
||||
|
||||
*.ico binary
|
||||
*.jpg binary
|
||||
*.jpeg binary
|
||||
*.png binary
|
||||
*.svg text
|
||||
*.webp binary
|
||||
|
||||
*.ttf binary
|
||||
*.eot binary
|
||||
*.otf binary
|
||||
*.woff binary
|
||||
*.woff2 binary
|
1
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
web/atlas.json @placeAtlas/archival-team
|
2
_headers
|
@ -1,2 +0,0 @@
|
|||
/*
|
||||
Access-Control-Allow-Origin: *
|
|
@ -8674,3 +8674,180 @@ u1j2if
|
|||
u1in4u
|
||||
u1icud
|
||||
u2fp8n
|
||||
u314dc
|
||||
u2ziyc
|
||||
u2zhof
|
||||
u2zeqw
|
||||
u2zbxw
|
||||
u2zau5
|
||||
u2z9u9
|
||||
u2z8vq
|
||||
u2z7yd
|
||||
u2z5w0
|
||||
u2z3h7
|
||||
u2z1fk
|
||||
u2z0i7
|
||||
u2yyuq
|
||||
u2yj7i
|
||||
u2y7tn
|
||||
u2wno2
|
||||
u2w68c
|
||||
u2w5d9
|
||||
u2vtk6
|
||||
u2vsc5
|
||||
u2vqdl
|
||||
u2veh4
|
||||
u2vccn
|
||||
u2upjt
|
||||
u2teqf
|
||||
u2ta3t
|
||||
u2scps
|
||||
u2s6ko
|
||||
u2s2hq
|
||||
u2rp04
|
||||
u2oe4z
|
||||
u2nm4h
|
||||
u2mglu
|
||||
u2mes6
|
||||
u2kgt0
|
||||
u2kgkx
|
||||
u2kgam
|
||||
u2jx5f
|
||||
u2jwm8
|
||||
u2jw9y
|
||||
u2jv33
|
||||
u2jpqe
|
||||
u2j9e2
|
||||
u2j34d
|
||||
u2isv1
|
||||
u2iljx
|
||||
u2ikti
|
||||
u2id8k
|
||||
u2iar2
|
||||
u2iaaj
|
||||
u2i8dt
|
||||
u2i77s
|
||||
u2i6pj
|
||||
u2i52l
|
||||
u2i4c8
|
||||
u2i3n3
|
||||
u2i2s5
|
||||
u2i2ay
|
||||
u2i18l
|
||||
u2hx8q
|
||||
u2hw35
|
||||
u2hq4n
|
||||
u2hoxw
|
||||
u2hndh
|
||||
u2hmv9
|
||||
u2hjn8
|
||||
u2hfb1
|
||||
u2heem
|
||||
u2h3d5
|
||||
u2h2uj
|
||||
u2h2ba
|
||||
u2gxgd
|
||||
u2gmz0
|
||||
u2gkze
|
||||
u2gjfb
|
||||
u2ggwy
|
||||
u2gdu6
|
||||
u2g3ho
|
||||
u39w0p
|
||||
u3956r
|
||||
u38yox
|
||||
u38y8w
|
||||
u38xtp
|
||||
u38x97
|
||||
u38wn0
|
||||
u38w7l
|
||||
u38siz
|
||||
u38rko
|
||||
u38glf
|
||||
u38fnb
|
||||
u38eza
|
||||
u38b71
|
||||
u38ang
|
||||
u38a1v
|
||||
u389gh
|
||||
u388kv
|
||||
u385vc
|
||||
u3856f
|
||||
u384h1
|
||||
u383k2
|
||||
u382nl
|
||||
u381h1
|
||||
u37bs2
|
||||
u37bal
|
||||
u37b2x
|
||||
u37abx
|
||||
u37196
|
||||
u3703q
|
||||
u36yic
|
||||
u36w1e
|
||||
u36k9o
|
||||
u360ch
|
||||
u35z7z
|
||||
u35y99
|
||||
u352sv
|
||||
u35255
|
||||
u3505v
|
||||
u34wnr
|
||||
u343uv
|
||||
u342ys
|
||||
u3426y
|
||||
u341c5
|
||||
u33yqz
|
||||
u33y72
|
||||
u33tjn
|
||||
u33syf
|
||||
u33n2r
|
||||
u40u61
|
||||
u40diy
|
||||
u402pe
|
||||
u3zfyg
|
||||
u3zbgo
|
||||
u3z8s5
|
||||
u3z0ym
|
||||
u3yzud
|
||||
u3yvxk
|
||||
u3yuzv
|
||||
u3ytml
|
||||
u3ysji
|
||||
u3yepe
|
||||
u3xupi
|
||||
u3xn44
|
||||
u3xlz8
|
||||
u3wnsl
|
||||
u3wmdt
|
||||
u3wlmr
|
||||
u3wkqe
|
||||
u3wigu
|
||||
u3wf0o
|
||||
u3wcn9
|
||||
u3wbpf
|
||||
u3umji
|
||||
u3uftd
|
||||
u3ueag
|
||||
u3udwc
|
||||
u3u038
|
||||
u3sxkx
|
||||
u3sq78
|
||||
u3qf4m
|
||||
u3pe7k
|
||||
u3pc5u
|
||||
u3o3ls
|
||||
u3nhfo
|
||||
u3lnul
|
||||
u3llih
|
||||
u3kmx3
|
||||
u3kftg
|
||||
u3ir6q
|
||||
u3grqq
|
||||
u3ghal
|
||||
u3gei4
|
||||
u3gcsl
|
||||
u3ga5g
|
||||
u3g3d6
|
||||
u3bdkp
|
||||
u39z7g
|
||||
|
|
10
netlify.toml
|
@ -1,10 +0,0 @@
|
|||
[[headers]]
|
||||
for = "/*"
|
||||
[headers.values]
|
||||
Access-Control-Allow-Origin = "*"
|
||||
|
||||
[[headers]]
|
||||
for = "/_img/place/*.png"
|
||||
[headers.values]
|
||||
# 28 days
|
||||
cache-control = "public, max-age=604800"
|
Before Width: | Height: | Size: 118 KiB |
192
tools/calculate_center.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
"""
|
||||
From https://github.com/Twista/python-polylabel/,
|
||||
which is in turn implemented from https://github.com/mapbox/polylabel
|
||||
"""
|
||||
from math import sqrt, log10
|
||||
import time
|
||||
from typing import Tuple, List
|
||||
|
||||
# Python3
|
||||
from queue import PriorityQueue
|
||||
from math import inf
|
||||
|
||||
Point = Tuple[float, float]
|
||||
Polygon = List[Point]
|
||||
|
||||
SQRT2 = sqrt(2)
|
||||
|
||||
|
||||
def _point_to_polygon_distance(x: float, y: float, polygon: Polygon) -> float:
|
||||
inside: bool = False
|
||||
min_distance_squared: float = inf
|
||||
|
||||
previous: Point = polygon[-1]
|
||||
for current in polygon:
|
||||
if ((current[1] > y) != (previous[1] > y) and
|
||||
(x < (previous[0] - current[0]) * (y - current[1]) / (previous[1] - current[1]) + current[0])):
|
||||
inside = not inside
|
||||
|
||||
min_distance_squared = min(min_distance_squared, _get_segment_distance_squared(x, y, current, previous))
|
||||
previous = current
|
||||
|
||||
result: float = sqrt(min_distance_squared)
|
||||
if not inside:
|
||||
return -result
|
||||
return result
|
||||
|
||||
|
||||
def _get_segment_distance_squared(px: float, py: float, point_a: Point, point_b: Point) -> float:
|
||||
x: float = point_a[0]
|
||||
y: float = point_a[1]
|
||||
dx: float = point_b[0] - x
|
||||
dy: float = point_b[1] - y
|
||||
|
||||
if dx != 0 or dy != 0:
|
||||
t = ((px - x) * dx + (py - y) * dy) / (dx * dx + dy * dy)
|
||||
|
||||
if t > 1:
|
||||
x = point_b[0]
|
||||
y = point_b[1]
|
||||
|
||||
elif t > 0:
|
||||
x += dx * t
|
||||
y += dy * t
|
||||
|
||||
dx = px - x
|
||||
dy = py - y
|
||||
|
||||
return dx * dx + dy * dy
|
||||
|
||||
|
||||
class Cell(object):
|
||||
def __init__(self, x: float, y: float, h: float, polygon: Polygon, centroid: Point):
|
||||
self.h: float = h
|
||||
self.y: float = y
|
||||
self.x: float = x
|
||||
min_dist = _point_to_polygon_distance(x, y, polygon)
|
||||
self.min_dist: float = min_dist
|
||||
self.center_dist: float = (centroid[0] - x) ** 2 + (centroid[1] - y) ** 2
|
||||
self.max = self.min_dist + self.h * SQRT2
|
||||
self.weight = -self.center_dist - self.max
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.max < other.max
|
||||
|
||||
def __lte__(self, other):
|
||||
return self.max <= other.max
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.max > other.max
|
||||
|
||||
def __gte__(self, other):
|
||||
return self.max >= other.max
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.max == other.max
|
||||
|
||||
|
||||
def _get_centroid(polygon: Polygon) -> Point:
|
||||
area: float = 0
|
||||
x: float = 0
|
||||
y: float = 0
|
||||
previous: Point = polygon[-1]
|
||||
for current in polygon:
|
||||
f: float = current[0] * previous[1] - previous[0] * current[1]
|
||||
x += (current[0] + previous[0]) * f
|
||||
y += (current[1] + previous[1]) * f
|
||||
area += f * 3
|
||||
previous =current
|
||||
if area == 0:
|
||||
return (polygon[0][0], polygon[0][1])
|
||||
return (x / area, y / area)
|
||||
|
||||
|
||||
def _get_centroid_cell(polygon: Polygon, centroid: Point) -> Cell:
|
||||
return Cell(centroid[0], centroid[1], 0, polygon, centroid)
|
||||
|
||||
|
||||
def polylabel(polygon: Polygon, precision: float=0.5, debug: bool=False):
|
||||
# find bounding box
|
||||
first_item: Point = polygon[0]
|
||||
min_x: float = first_item[0]
|
||||
min_y: float = first_item[1]
|
||||
max_x: float = first_item[0]
|
||||
max_y: float = first_item[1]
|
||||
for p in polygon:
|
||||
if p[0] < min_x:
|
||||
min_x = p[0]
|
||||
if p[1] < min_y:
|
||||
min_y = p[1]
|
||||
if p[0] > max_x:
|
||||
max_x = p[0]
|
||||
if p[1] > max_y:
|
||||
max_y = p[1]
|
||||
|
||||
width: float = max_x - min_x
|
||||
height: float = max_y - min_y
|
||||
cell_size: float = min(width, height)
|
||||
h: float = cell_size / 2.0
|
||||
|
||||
cell_queue: PriorityQueue[Tuple[float, int, Cell]] = PriorityQueue()
|
||||
|
||||
if cell_size == 0:
|
||||
return [(max_x - min_x) / 2, (max_y - min_y) / 2]
|
||||
|
||||
centroid: Point = _get_centroid(polygon)
|
||||
|
||||
# cover polygon with initial cells
|
||||
x: float = min_x
|
||||
while x < max_x:
|
||||
y: float = min_y
|
||||
while y < max_y:
|
||||
c: Cell = Cell(x + h, y + h, h, polygon, centroid)
|
||||
y += cell_size
|
||||
cell_queue.put((c.weight, time.time(), c))
|
||||
x += cell_size
|
||||
|
||||
best_cell: Cell = _get_centroid_cell(polygon, centroid)
|
||||
|
||||
bbox_cell: Cell = Cell(min_x + width / 2, min_y + height / 2, 0, polygon, centroid)
|
||||
if bbox_cell.min_dist > best_cell.min_dist:
|
||||
best_cell = bbox_cell
|
||||
|
||||
# how much closer is an point allowed to be to the border,
|
||||
# while having a shorter distance to the centroid
|
||||
threshold: float = log10(cell_size) / 3.0
|
||||
|
||||
num_of_probes = cell_queue.qsize()
|
||||
while not cell_queue.empty():
|
||||
_, __, cell = cell_queue.get()
|
||||
|
||||
# update if either the cell is further from the edge,
|
||||
# or if it is sufficiently similary far from the edge,
|
||||
# but closer to the centroid
|
||||
if (cell.min_dist > best_cell.min_dist
|
||||
or (
|
||||
cell.center_dist < best_cell.center_dist
|
||||
and cell.min_dist > best_cell.min_dist - threshold
|
||||
)
|
||||
):
|
||||
best_cell = cell
|
||||
|
||||
if debug:
|
||||
print(f'found best {round(cell.min_dist, 4)};{round(sqrt(cell.center_dist), 4)} after {num_of_probes} probes')
|
||||
|
||||
if cell.max - best_cell.min_dist <= precision:
|
||||
continue
|
||||
|
||||
h = cell.h / 2
|
||||
c = Cell(cell.x - h, cell.y - h, h, polygon, centroid)
|
||||
cell_queue.put((c.weight, time.time(), c))
|
||||
c = Cell(cell.x + h, cell.y - h, h, polygon, centroid)
|
||||
cell_queue.put((c.weight, time.time(), c))
|
||||
c = Cell(cell.x - h, cell.y + h, h, polygon, centroid)
|
||||
cell_queue.put((c.weight, time.time(), c))
|
||||
c = Cell(cell.x + h, cell.y + h, h, polygon, centroid)
|
||||
cell_queue.put((c.weight, time.time(), c))
|
||||
num_of_probes += 4
|
||||
|
||||
if debug:
|
||||
print(f'num probes: {num_of_probes}')
|
||||
print(f'best distance: {best_cell.min_dist}')
|
||||
return [best_cell.x, best_cell.y]
|
|
@ -2,6 +2,9 @@
|
|||
|
||||
import re
|
||||
import json
|
||||
import math
|
||||
|
||||
from calculate_center import polylabel
|
||||
|
||||
"""
|
||||
Examples:
|
||||
|
@ -19,29 +22,30 @@
|
|||
"""
|
||||
FS_REGEX = {
|
||||
"commatization": r'( *(,+ +|,+ |,+)| +)(and|&|;)( *(,+ +|,+ |,+)| +)|, *$| +',
|
||||
"pattern1": r'\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
||||
"pattern2": r'^\/*[rR](?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
||||
"pattern3": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
|
||||
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
||||
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
|
||||
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
|
||||
"pattern1": r'\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
|
||||
"pattern2": r'^\/*[rR](?!\/)([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
|
||||
"pattern3": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*',
|
||||
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
|
||||
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/$)?',
|
||||
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*',
|
||||
"pattern1new": r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*'
|
||||
# "pattern4": r'(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*',
|
||||
# "pattern5": r'\[(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\]\((?:https:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\)"',
|
||||
}
|
||||
|
||||
VALIDATE_REGEX = {
|
||||
"subreddit": r'^ *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *(, *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{1,20}) *)*$|^$',
|
||||
"subreddit": r'^ *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{2,20}) *(, *\/?r\/([A-Za-z0-9][A-Za-z0-9_]{2,20}) *)*$|^$',
|
||||
"website": r'^https?://[^\s/$.?#].[^\s]*$|^$'
|
||||
}
|
||||
|
||||
CL_REGEX = r'\[(.+?)\]\((.+?)\)'
|
||||
CWTS_REGEX = {
|
||||
"url": r'^(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/)$',
|
||||
"subreddit": r'^\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})\/?$'
|
||||
"url": r'^(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/r\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/)$',
|
||||
"subreddit": r'^\/*[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})\/?$'
|
||||
}
|
||||
CSTW_REGEX = {
|
||||
"website": r'^https?://[^\s/$.?#].[^\s]*$',
|
||||
"user": r'^\/*u\/([A-Za-z0-9][A-Za-z0-9_]{1,20})$'
|
||||
"user": r'^\/*u\/([A-Za-z0-9][A-Za-z0-9_]{2,20})$'
|
||||
}
|
||||
|
||||
# r/... to /r/...
|
||||
|
@ -52,10 +56,11 @@ def format_subreddit(entry: dict):
|
|||
"""
|
||||
Fix formatting of the value on "subreddit".
|
||||
"""
|
||||
if not "subreddit" in entry or not entry['subreddit']:
|
||||
return entry
|
||||
|
||||
if "subreddit" in entry and entry["subreddit"]:
|
||||
|
||||
subredditLink = entry["subreddit"]
|
||||
|
||||
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
|
||||
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
|
||||
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
|
||||
|
@ -64,15 +69,30 @@ def format_subreddit(entry: dict):
|
|||
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
|
||||
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
|
||||
|
||||
if not subredditLink:
|
||||
return entry
|
||||
|
||||
entry["subreddit"] = subredditLink
|
||||
|
||||
if "links" in entry and "subreddit" in entry["links"]:
|
||||
|
||||
for i in range(len(entry["links"]["subreddit"])):
|
||||
|
||||
subredditLink = entry["links"]["subreddit"][i]
|
||||
|
||||
subredditLink = re.sub(FS_REGEX["pattern3"], r"\1", subredditLink)
|
||||
subredditLink = re.sub(FS_REGEX["pattern1new"], r"\1", subredditLink)
|
||||
|
||||
entry["links"]["subreddit"][i] = subredditLink
|
||||
|
||||
return entry
|
||||
|
||||
def collapse_links(entry: dict):
|
||||
"""
|
||||
Collapses Markdown links.
|
||||
"""
|
||||
|
||||
if "website" in entry and entry['website']:
|
||||
website = entry["website"];
|
||||
|
||||
website = entry["website"]
|
||||
|
||||
if re.search(CL_REGEX, website):
|
||||
match = re.search(CL_REGEX, website)
|
||||
if match.group(1) == match.group(2):
|
||||
|
@ -80,8 +100,23 @@ def collapse_links(entry: dict):
|
|||
|
||||
entry["website"] = website
|
||||
|
||||
elif "links" in entry and "website" in entry["links"]:
|
||||
|
||||
for i in range(len(entry["links"]["website"])):
|
||||
|
||||
website = entry["links"]["website"][i]
|
||||
|
||||
if re.search(CL_REGEX, website):
|
||||
match = re.search(CL_REGEX, website)
|
||||
if match.group(1) == match.group(2):
|
||||
website = match.group(2)
|
||||
|
||||
entry["links"]["website"][i] = website
|
||||
|
||||
if "subreddit" in entry and entry['subreddit']:
|
||||
subreddit = entry["subreddit"];
|
||||
|
||||
subreddit = entry["subreddit"]
|
||||
|
||||
if re.search(CL_REGEX, subreddit):
|
||||
match = re.search(CL_REGEX, subreddit)
|
||||
if match.group(1) == match.group(2):
|
||||
|
@ -89,12 +124,27 @@ def collapse_links(entry: dict):
|
|||
|
||||
entry["subreddit"] = subreddit
|
||||
|
||||
elif "links" in entry and "subreddit" in entry["links"]:
|
||||
|
||||
for i in range(len(entry["links"]["subreddit"])):
|
||||
|
||||
subreddit = entry["links"]["subreddit"][i]
|
||||
|
||||
if re.search(CL_REGEX, subreddit):
|
||||
match = re.search(CL_REGEX, subreddit)
|
||||
if match.group(1) == match.group(2):
|
||||
subreddit = match.group(2)
|
||||
|
||||
entry["links"]["subreddit"][i] = subreddit
|
||||
|
||||
|
||||
return entry
|
||||
|
||||
def remove_extras(entry: dict):
|
||||
"""
|
||||
Removing unnecessary extra characters and converts select characters.
|
||||
"""
|
||||
|
||||
if "subreddit" in entry and entry["subreddit"]:
|
||||
# if not entry["subreddit"].startswith('/r/'):
|
||||
# entry["subreddit"] = re.sub(r'^(.*)(?=\/r\/)', r'', entry["subreddit"])
|
||||
|
@ -124,6 +174,11 @@ def remove_duplicate_points(entry: dict):
|
|||
"""
|
||||
Removes points from paths that occur twice after each other
|
||||
"""
|
||||
|
||||
if not "path" in entry:
|
||||
return entry
|
||||
|
||||
if isinstance(entry['path'], list):
|
||||
path: list = entry['path']
|
||||
previous: list = path[0]
|
||||
for i in range(len(path)-1, -1, -1):
|
||||
|
@ -131,6 +186,15 @@ def remove_duplicate_points(entry: dict):
|
|||
if current == previous:
|
||||
path.pop(i)
|
||||
previous = current
|
||||
else:
|
||||
for key in entry['path']:
|
||||
path: list = entry['path'][key]
|
||||
previous: list = path[0]
|
||||
for i in range(len(path)-1, -1, -1):
|
||||
current: list = path[i]
|
||||
if current == previous:
|
||||
path.pop(i)
|
||||
previous = current
|
||||
|
||||
return entry
|
||||
|
||||
|
@ -138,6 +202,7 @@ def fix_r_caps(entry: dict):
|
|||
"""
|
||||
Fixes capitalization of /r/. (/R/place -> /r/place)
|
||||
"""
|
||||
|
||||
if not "description" in entry or not entry['description']:
|
||||
return entry
|
||||
|
||||
|
@ -150,9 +215,12 @@ def fix_no_protocol_urls(entry: dict):
|
|||
"""
|
||||
Fixes URLs with no protocol by adding "https://" protocol.
|
||||
"""
|
||||
if not "website" in entry or not entry['website']:
|
||||
return entry
|
||||
|
||||
if "links" in entry and "website" in entry['links']:
|
||||
for i in range(len(entry["links"]["website"])):
|
||||
if entry["links"]["website"][i] and not entry["links"]["website"][i].startswith("http"):
|
||||
entry["links"]["website"][i] = "https://" + entry["website"]
|
||||
elif "website" in entry and entry['website']:
|
||||
if not entry["website"].startswith("http"):
|
||||
entry["website"] = "https://" + entry["website"]
|
||||
|
||||
|
@ -162,9 +230,29 @@ def convert_website_to_subreddit(entry: dict):
|
|||
"""
|
||||
Converts the subreddit link on "website" to "subreddit" if possible.
|
||||
"""
|
||||
if not "website" in entry or not entry['website']:
|
||||
return entry
|
||||
|
||||
if "links" in entry and "website" in entry["links"]:
|
||||
for i in range(len(entry["links"]["website"])):
|
||||
if re.match(CWTS_REGEX["url"], entry["links"]["website"][i]):
|
||||
new_subreddit = re.sub(CWTS_REGEX["url"], r"\1", entry["links"]["website"][i])
|
||||
if new_subreddit in entry["links"]["subreddit"]:
|
||||
entry["links"]["website"][i] = ""
|
||||
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
|
||||
if not "subreddit" in entry["links"]:
|
||||
entry["links"]["subreddit"] = []
|
||||
entry["links"]["subreddit"].append(new_subreddit)
|
||||
entry["links"]["website"][i] = ""
|
||||
elif re.match(CWTS_REGEX["subreddit"], entry["links"]["website"][i]):
|
||||
new_subreddit = re.sub(CWTS_REGEX["subreddit"], r"\1", entry["links"]["website"][i])
|
||||
if new_subreddit in entry["links"]["subreddit"]:
|
||||
entry["links"]["website"][i] = ""
|
||||
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
|
||||
if not "subreddit" in entry["links"]:
|
||||
entry["links"]["subreddit"] = []
|
||||
entry["links"]["subreddit"].append(new_subreddit)
|
||||
entry["links"]["website"][i] = ""
|
||||
|
||||
elif "website" in entry and entry['website']:
|
||||
if re.match(CWTS_REGEX["url"], entry["website"]):
|
||||
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
|
||||
if (new_subreddit.lower() == entry["subreddit"].lower()):
|
||||
|
@ -186,9 +274,26 @@ def convert_subreddit_to_website(entry: dict):
|
|||
"""
|
||||
Converts the links on "subreddit" to a "website" if needed. This also supports Reddit users (/u/reddit).
|
||||
"""
|
||||
if not "subreddit" in entry or not entry['subreddit']:
|
||||
return entry
|
||||
|
||||
if "links" in entry and "subreddit" in entry["links"]:
|
||||
for i in range(len(entry["links"]["subreddit"])):
|
||||
if re.match(CSTW_REGEX["website"], entry["links"]["subreddit"][i]):
|
||||
if "website" in entry["links"] and entry["links"]["subreddit"][i] in entry["links"]["website"]:
|
||||
entry["links"]["subreddit"][i] = ""
|
||||
elif not "website" in entry["links"] or len(entry["website"]) == 0:
|
||||
if not "website" in entry["links"]:
|
||||
entry["links"]["website"] = []
|
||||
entry["website"].append(entry["links"]["subreddit"][i])
|
||||
entry["links"]["subreddit"][i] = ""
|
||||
elif re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]):
|
||||
if not "website" in entry["links"] or len(entry["website"]) == 0:
|
||||
username = re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]).group(1)
|
||||
if not "website" in entry["links"]:
|
||||
entry["links"]["website"] = []
|
||||
entry["website"].append("https://www.reddit.com/user/" + username)
|
||||
entry["links"]["subreddit"][i] = ""
|
||||
|
||||
elif "subreddit" in entry and entry['subreddit']:
|
||||
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
|
||||
if (entry["website"].lower() == entry["subreddit"].lower()):
|
||||
entry["subreddit"] = ""
|
||||
|
@ -206,46 +311,44 @@ def convert_subreddit_to_website(entry: dict):
|
|||
def calculate_center(path: list):
|
||||
"""
|
||||
Caluclates the center of a polygon
|
||||
|
||||
adapted from /web/_js/draw.js:calucalteCenter()
|
||||
"""
|
||||
area = 0
|
||||
x = 0
|
||||
y = 0
|
||||
|
||||
for i in range(len(path)):
|
||||
point1 = path[i]
|
||||
point2 = path[i-1 if i != 0 else len(path)-1]
|
||||
f = point1[0] * point2[1] - point2[0] * point1[1]
|
||||
area += f
|
||||
x += (point1[0] + point2[0]) * f
|
||||
y += (point1[1] + point2[1]) * f
|
||||
|
||||
area *= 3
|
||||
|
||||
if area != 0:
|
||||
return [x // area + 0.5, y // area + 0.5]
|
||||
else:
|
||||
# get the center of a straight line
|
||||
max_x = max(i[0] for i in path)
|
||||
min_x = min(i[0] for i in path)
|
||||
max_y = max(i[1] for i in path)
|
||||
min_y = min(i[1] for i in path)
|
||||
return [(max_x + min_x) // 2 + 0.5, (max_y + min_y) // 2 + 0.5]
|
||||
result = polylabel(path)
|
||||
return [math.floor(result[0]) + 0.5, math.floor(result[1]) + 0.5]
|
||||
|
||||
def update_center(entry: dict):
|
||||
"""
|
||||
checks if the center of a entry is up to date, and updates it if it's either missing or outdated
|
||||
checks if the center of a entry is up to date, and updates it if it's either missing or outdated.
|
||||
"""
|
||||
|
||||
if 'path' not in entry:
|
||||
return entry
|
||||
|
||||
if isinstance(entry['path'], list):
|
||||
path = entry['path']
|
||||
if len(path) > 1:
|
||||
calculated_center = calculate_center(path)
|
||||
if 'center' not in entry or entry['center'] != calculated_center:
|
||||
entry['center'] = calculated_center
|
||||
entry['center'] = calculate_center(path)
|
||||
else:
|
||||
for key in entry['path']:
|
||||
path = entry['path'][key]
|
||||
if len(path) > 1:
|
||||
entry['center'][key] = calculate_center(path)
|
||||
|
||||
return entry
|
||||
|
||||
def remove_empty_and_similar(entry: dict):
|
||||
"""
|
||||
Removes empty items on lists, usually from the past formattings.
|
||||
"""
|
||||
|
||||
if "links" in entry:
|
||||
|
||||
for key in entry["links"]:
|
||||
small = list(map(lambda x: x.lower(), entry["links"][key]))
|
||||
entry["links"][key] = [x for x in entry["links"][key] if x and x.lower() in small]
|
||||
|
||||
return entry
|
||||
|
||||
|
||||
def validate(entry: dict):
|
||||
"""
|
||||
Validates the entry. Catch errors and tell warnings related to the entry.
|
||||
|
@ -256,17 +359,34 @@ def validate(entry: dict):
|
|||
2: Warnings that may effect user experience when interacting with the entry
|
||||
3: Errors that make the entry inaccessible or broken.
|
||||
"""
|
||||
|
||||
return_status = 0
|
||||
if (not "id" in entry or (not entry['id'] and not entry['id'] == 0)):
|
||||
print(f"Wait, no id here! How did this happened? {entry}")
|
||||
return_status = 3
|
||||
entry['id'] = '[MISSING_ID]'
|
||||
if not ("path" in entry and isinstance(entry["path"], list) and len(entry["path"]) > 0):
|
||||
|
||||
if "path" in entry:
|
||||
if isinstance(entry['path'], list):
|
||||
if len(entry["path"]) == 0:
|
||||
print(f"Entry {entry['id']} has no points!")
|
||||
return_status = 3
|
||||
elif len(entry["path"]) < 3:
|
||||
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
|
||||
return_status = 3
|
||||
else:
|
||||
for key in entry['path']:
|
||||
path = entry['path'][key]
|
||||
if len(path) == 0:
|
||||
print(f"Period {key} of entry {entry['id']} has no points!")
|
||||
return_status = 3
|
||||
elif len(path) < 3:
|
||||
print(f"Period {key} of entry {entry['id']} only has {len(entry['path'])} point(s)!")
|
||||
return_status = 3
|
||||
else:
|
||||
print(f"Entry {entry['id']} has no path at all!")
|
||||
return_status = 3
|
||||
|
||||
for key in entry:
|
||||
if key in VALIDATE_REGEX and not re.match(VALIDATE_REGEX[key], entry[key]):
|
||||
if return_status < 2: return_status = 2
|
||||
|
@ -316,6 +436,8 @@ def print_(*args, **kwargs):
|
|||
entry = remove_duplicate_points(entry)
|
||||
print_("Updating center...")
|
||||
entry = update_center(entry)
|
||||
print_("Remove empty items...")
|
||||
entry = remove_empty_and_similar(entry)
|
||||
print_("Validating...")
|
||||
status_code = validate(entry)
|
||||
print_("Completed!")
|
||||
|
@ -337,12 +459,12 @@ def go(path):
|
|||
entries[i] = None
|
||||
else:
|
||||
entries[i] = entry_formatted
|
||||
if not (i % 500):
|
||||
if not (i % 200):
|
||||
print(f"{i} checked.")
|
||||
|
||||
print(f"{len(entries)} checked.")
|
||||
|
||||
with open(path, "w", encoding='UTF-8') as f2:
|
||||
with open(path, "w", encoding='utf-8', newline='\n') as f2:
|
||||
f2.write(per_line_entries(entries))
|
||||
|
||||
print("Writing completed. All done.")
|
||||
|
|
61
tools/merge_out.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
import praw
|
||||
import json
|
||||
import time
|
||||
import re
|
||||
import os
|
||||
import traceback
|
||||
from formatter import format_all, per_line_entries
|
||||
|
||||
out_ids = []
|
||||
out_dupe_ids = []
|
||||
out_edited_added_ids = []
|
||||
atlas_ids = []
|
||||
|
||||
with open('temp_atlas.json', 'r', encoding='utf-8') as out_file:
|
||||
out_json = json.loads(out_file.read())
|
||||
|
||||
with open('../web/atlas.json', 'r', encoding='utf-8') as atlas_file:
|
||||
atlas_json = json.loads(atlas_file.read())
|
||||
|
||||
for entry in atlas_json:
|
||||
atlas_ids.append(entry['id'])
|
||||
|
||||
for entry in out_json:
|
||||
if (entry['id'] in out_ids):
|
||||
print(f"Entry {entry['id']} has duplicates! Please resolve this conflict. This will be excluded from the merge.")
|
||||
out_dupe_ids.append(entry['id'])
|
||||
out_ids.append(entry['id'])
|
||||
|
||||
for entry in out_json:
|
||||
if entry['id'] in out_dupe_ids:
|
||||
continue
|
||||
|
||||
if 'edit' in entry and entry['edit']:
|
||||
index = next((i for i, item in enumerate(atlas_json) if item["id"] == entry['id']), None)
|
||||
|
||||
assert index != None, "Edit failed! ID not found on Atlas."
|
||||
|
||||
print(f"Edited {atlas_json[index]['id']} with {entry['edit']}")
|
||||
|
||||
if 'edit' in entry:
|
||||
out_edited_added_ids.append(entry['edit'])
|
||||
del entry['edit']
|
||||
if 'submitted_by' in atlas_json[index]:
|
||||
atlas_json[index]['contributors'] = [ atlas_json[index]['submitted_by'] ]
|
||||
elif not 'contributors' in atlas_json[index]:
|
||||
atlas_json[index]['contributors'] = []
|
||||
entry['contributors'] = atlas_json[index]['contributors'] + list(set(entry['contributors']) - set(atlas_json[index]['contributors']))
|
||||
atlas_json[index] = entry
|
||||
else:
|
||||
print(f"Added {entry['id']}.")
|
||||
atlas_json.append(entry)
|
||||
|
||||
print('Writing...')
|
||||
with open('../web/atlas.json', 'w', encoding='utf-8') as atlas_file:
|
||||
atlas_file.write(per_line_entries(atlas_json))
|
||||
|
||||
with open('../data/read-ids.txt', 'a', encoding='utf-8') as read_ids_file:
|
||||
with open('read-ids-temp.txt', 'r', encoding='utf-8') as read_ids_temp_file:
|
||||
read_ids_file.writelines(read_ids_temp_file.readlines())
|
||||
|
||||
print('All done.')
|
105
tools/migrate_atlas_format.py
Normal file
|
@ -0,0 +1,105 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
'''
|
||||
Migrator script from old atlas format to remastered atlas format.
|
||||
- center and path: single -> time-specific
|
||||
- website and subreddit: single strings -> links object
|
||||
- submitted_by -> contributors
|
||||
'''
|
||||
#
|
||||
|
||||
# Migrates the old atlas format (single center/path) to the remastered atlas format (time-boxed centers/paths)
|
||||
|
||||
def per_line_entries(entries: list):
|
||||
out = '[\n'
|
||||
for entry in entries:
|
||||
out += json.dumps(entry, ensure_ascii=False) + ',\n'
|
||||
return out[:-2] + '\n]'
|
||||
|
||||
file_path = os.path.join('..', 'web', 'atlas.json')
|
||||
|
||||
END_IMAGE = 166
|
||||
INIT_CANVAS_RANGE = (1, END_IMAGE)
|
||||
EXPANSION_1_RANGE = (56, END_IMAGE)
|
||||
EXPANSION_2_RANGE = (109, END_IMAGE)
|
||||
|
||||
COMMATIZATION = re.compile(r'(?: *(?:,+ +|,+ |,+)| +)(?:and|&|;)(?: *(?:,+ +|,+ |,+)| +)|, *$| +')
|
||||
FS_REGEX = re.compile(r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{2,20})(?:\/[^" ]*)*')
|
||||
|
||||
with open(file_path, 'r+', encoding='UTF-8') as file:
|
||||
entries = json.loads(file.read())
|
||||
|
||||
index = 0
|
||||
|
||||
for entry in entries:
|
||||
new_entry = {
|
||||
"id": "",
|
||||
"name": "",
|
||||
"description": "",
|
||||
"links": {},
|
||||
"center": {},
|
||||
"path": {},
|
||||
"contributors": []
|
||||
}
|
||||
|
||||
center = entry['center']
|
||||
path = entry['path']
|
||||
|
||||
if isinstance(center, list):
|
||||
|
||||
# Use the center to figure out which canvas expansion the entry is in.
|
||||
if center[1] > 1000:
|
||||
time_range = EXPANSION_2_RANGE
|
||||
elif center[0] > 1000:
|
||||
time_range = EXPANSION_1_RANGE
|
||||
else:
|
||||
time_range = INIT_CANVAS_RANGE
|
||||
|
||||
time_key = '%d-%d, T:0-2' % time_range
|
||||
|
||||
new_entry = {
|
||||
**new_entry,
|
||||
"center": {
|
||||
time_key: center
|
||||
},
|
||||
"path": {
|
||||
time_key: path
|
||||
}
|
||||
}
|
||||
|
||||
del entry['center']
|
||||
del entry['path']
|
||||
|
||||
if "website" in entry:
|
||||
if isinstance(entry["website"], str) and entry["website"]:
|
||||
new_entry['links']['website'] = [entry['website']]
|
||||
del entry['website']
|
||||
|
||||
if "subreddit" in entry:
|
||||
if isinstance(entry["subreddit"], str) and entry["subreddit"]:
|
||||
new_entry['links']['subreddit'] = list(map(lambda x: FS_REGEX.sub(r"\1", x), COMMATIZATION.split(entry['subreddit'])))
|
||||
del entry['subreddit']
|
||||
|
||||
if "submitted_by" in entry:
|
||||
new_entry['contributors'].append(entry['submitted_by'])
|
||||
del entry['submitted_by']
|
||||
|
||||
entries[index] = {
|
||||
**new_entry,
|
||||
**entry
|
||||
}
|
||||
|
||||
index += 1
|
||||
|
||||
if not (index % 1000):
|
||||
print(f"{index} checked.")
|
||||
|
||||
print(f"{len(entries)} checked.")
|
||||
print("Writing...")
|
||||
|
||||
with open(file_path, 'w', encoding='utf-8', newline='\n') as f2:
|
||||
f2.write(per_line_entries(entries))
|
||||
|
||||
print("All done!")
|
|
@ -1,14 +1,40 @@
|
|||
"""
|
||||
Auth setup
|
||||
1. Head to https://www.reddit.com/prefs/apps
|
||||
2. Click "create another app"
|
||||
3. Give it a name and description
|
||||
4. Select "script"
|
||||
5. Redirect to http://localhost:8080
|
||||
6. Create file "credentials" with the format below
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ [ID] <- Under "personal use script" │
|
||||
│ [Secret] │
|
||||
│ [Username] <- Must be a mod, don't do this if you │
|
||||
│ [Password] <- don't know what you are doing. │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
7. Run Script
|
||||
|
||||
Running Script
|
||||
1. Input the next ID to use
|
||||
2. Manually resolve errors in manual_atlas.json
|
||||
3 a. Use merge_out.py, or...
|
||||
b. a. Copy temp_atlas.json entries into web/_js/atlas.js (mind the edits!)
|
||||
b. Copy read-ids-temp.txt IDs into data/read-ids.txt
|
||||
5. Create a pull request
|
||||
"""
|
||||
|
||||
import praw
|
||||
import json
|
||||
import time
|
||||
import re
|
||||
import os
|
||||
import traceback
|
||||
from formatter import format_all
|
||||
|
||||
outfile = open('temp_atlas.json', 'w', encoding='utf-8')
|
||||
editidsfile = open('read-ids-temp.txt', 'w')
|
||||
failfile = open('manual_atlas.json', 'w', encoding='utf-8')
|
||||
OUT_FILE = open('temp_atlas.json', 'w', encoding='utf-8')
|
||||
READ_IDS_FILE = open('read-ids-temp.txt', 'w')
|
||||
FAIL_FILE = open('manual_atlas.txt', 'w', encoding='utf-8')
|
||||
|
||||
OUT_FILE_LINES = ['[\n', ']\n']
|
||||
|
||||
with open('credentials', 'r') as file:
|
||||
credentials = file.readlines()
|
||||
|
@ -32,10 +58,21 @@
|
|||
|
||||
existing_ids = []
|
||||
|
||||
with open('../data/edit-ids.txt', 'r') as edit_ids_file:
|
||||
with open('../data/read-ids.txt', 'r') as edit_ids_file:
|
||||
for id in [x.strip() for x in edit_ids_file.readlines()]:
|
||||
existing_ids.append(id)
|
||||
|
||||
# with open('../web/atlas.json', 'r') as atlas_file:
|
||||
# atlas_raw: list = json.loads(atlas_file)
|
||||
# atlas = {}
|
||||
# atlas_ids = []
|
||||
# for index in atlas_raw:
|
||||
# entry = atlas_raw[index]
|
||||
# id = entry['id']
|
||||
# del entry['id']
|
||||
# atlas[id] = entry
|
||||
# atlas_ids.append(id)
|
||||
|
||||
def set_flair(submission, flair):
|
||||
if has_write_access and submission.link_flair_text != flair:
|
||||
flair_choices = submission.flair.choices()
|
||||
|
@ -48,31 +85,7 @@ def set_flair(submission, flair):
|
|||
successcount = 0
|
||||
totalcount = 0
|
||||
|
||||
outfile.write("[\n")
|
||||
for submission in reddit.subreddit('placeAtlas2').new(limit=2000):
|
||||
"""
|
||||
Auth setup
|
||||
1. Head to https://www.reddit.com/prefs/apps
|
||||
2. Click "create another app"
|
||||
3. Give it a name and description
|
||||
4. Select "script"
|
||||
5. Redirect to http://localhost:8080
|
||||
6. Create file "credentials" with the format below.
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ [ID] <- Under "personal use script" │
|
||||
│ [Secret] │
|
||||
│ [Username] <- Must be a mod, don't do this if you │
|
||||
│ [Password] <- don't know what you are doing. │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
7. Run Script
|
||||
|
||||
Running Script
|
||||
1. Input the next ID to use
|
||||
2. Manually resolve errors in manual_atlas.json
|
||||
3. Copy temp_atlas.json entries into web/_js/atlas.js
|
||||
4. Pull Request
|
||||
|
||||
"""
|
||||
total_all_flairs += 1
|
||||
|
||||
if (submission.id in existing_ids):
|
||||
|
@ -84,7 +97,7 @@ def set_flair(submission, flair):
|
|||
else:
|
||||
continue
|
||||
|
||||
if (submission.link_flair_text == "New Entry"):
|
||||
if submission.link_flair_text == "New Entry" or submission.link_flair_text == "Edit Entry":
|
||||
|
||||
try:
|
||||
|
||||
|
@ -92,7 +105,7 @@ def set_flair(submission, flair):
|
|||
rawtext = text
|
||||
|
||||
text = text.replace('\u200c', '')
|
||||
text = re.compile(r".*(\{.+\}).*", re.DOTALL).search(text).group(1)
|
||||
text = re.compile(r"(\{.+\})", re.DOTALL).search(text).group(0)
|
||||
# Test if it needs to escape the escape character. Usually happens on fancy mode.
|
||||
try: json.loads(text)
|
||||
except json.JSONDecodeError: text = re.sub(r"\\(.)", r"\1", text)
|
||||
|
@ -101,11 +114,32 @@ def set_flair(submission, flair):
|
|||
|
||||
if submission_json:
|
||||
|
||||
submission_json_dummy = {"id": submission.id, "submitted_by": ""}
|
||||
if submission.link_flair_text == "Edit Entry":
|
||||
|
||||
assert submission_json["id"] != 0, "Edit invalid because ID is tampered, it must not be 0!"
|
||||
|
||||
submission_json_dummy = {"id": submission_json["id"], "edit": submission.id}
|
||||
submission_json["contributors"] = []
|
||||
|
||||
try:
|
||||
submission_json_dummy["submitted_by"] = submission.author.name
|
||||
if not submission.author.name in submission_json:
|
||||
submission_json["contributors"].append(submission.author.name)
|
||||
except AttributeError:
|
||||
submission_json_dummy["submitted_by"] = "unknown"
|
||||
pass
|
||||
|
||||
else:
|
||||
|
||||
assert submission_json["id"] == 0, "Edit invalid because ID is tampered, it must be 0!"
|
||||
|
||||
submission_json_dummy = {"id": submission.id}
|
||||
submission_json["contributors"] = []
|
||||
|
||||
try:
|
||||
if not submission.author.name in submission_json:
|
||||
submission_json["contributors"].append(submission.author.name)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
for key in submission_json:
|
||||
if not key in submission_json_dummy:
|
||||
submission_json_dummy[key] = submission_json[key];
|
||||
|
@ -114,14 +148,15 @@ def set_flair(submission, flair):
|
|||
assert validation_status < 3, \
|
||||
"Submission invalid after validation. This may be caused by not enough points on the path."
|
||||
|
||||
outfile.write(json.dumps(submission_json, ensure_ascii=False) + ",\n")
|
||||
editidsfile.write(submission.id + '\n')
|
||||
OUT_FILE_LINES[len(OUT_FILE_LINES) - 2].replace('\n', ',\n')
|
||||
OUT_FILE_LINES.insert(len(OUT_FILE_LINES) - 1, json.dumps(submission_json, ensure_ascii=False) + '\n')
|
||||
READ_IDS_FILE.write(submission.id + '\n')
|
||||
successcount += 1
|
||||
set_flair(submission, "Processed Entry")
|
||||
|
||||
except Exception as e:
|
||||
failfile.write(
|
||||
"\n\n" + "="*40 + "\n\n" +
|
||||
FAIL_FILE.write(
|
||||
"\n\n" + "="*40 + "\n\nSubmission ID: " +
|
||||
submission.id + "\n\n" +
|
||||
traceback.format_exc() + "\n\n" +
|
||||
"==== RAW ====" + "\n\n" +
|
||||
|
@ -135,10 +170,6 @@ def set_flair(submission, flair):
|
|||
print("Wrote " + submission.id + ", submitted " + str(round(time.time()-submission.created_utc)) + " seconds ago")
|
||||
totalcount += 1
|
||||
|
||||
# Remove last trailing comma
|
||||
outfile.seek(outfile.tell()-3, os.SEEK_SET)
|
||||
outfile.truncate()
|
||||
|
||||
outfile.write("\n]")
|
||||
OUT_FILE.writelines(OUT_FILE_LINES)
|
||||
|
||||
print(f"\n\nTotal all flairs: {total_all_flairs}\nSuccess: {successcount}/{totalcount}\nFail: {failcount}/{totalcount}\nPlease check manual_atlas.txt for failed entries to manually resolve.")
|
||||
|
|
BIN
tools/unused/area-chart.png
Normal file
After Width: | Height: | Size: 42 KiB |
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
0
tools/combine.sh → tools/unused/combine.sh
Executable file → Normal file
11
web/_headers
Normal file
|
@ -0,0 +1,11 @@
|
|||
/*
|
||||
Access-Control-Allow-Origin: *
|
||||
|
||||
/_img/place/*.png
|
||||
cache-control: public, max-age=604800
|
||||
|
||||
/_img/canvas/*/*.png
|
||||
cache-control: public, max-age=604800
|
||||
|
||||
/_img/canvas/*.png
|
||||
cache-control: public, max-age=604800
|
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 25 KiB |
BIN
web/_img/canvas/place30/000_005.png
Normal file
After Width: | Height: | Size: 177 KiB |
BIN
web/_img/canvas/place30/001_005.png
Normal file
After Width: | Height: | Size: 221 KiB |
BIN
web/_img/canvas/place30/002_005.png
Normal file
After Width: | Height: | Size: 227 KiB |
BIN
web/_img/canvas/place30/003_005.png
Normal file
After Width: | Height: | Size: 202 KiB |
BIN
web/_img/canvas/place30/004_005.png
Normal file
After Width: | Height: | Size: 153 KiB |
BIN
web/_img/canvas/place30/005.png
Normal file
After Width: | Height: | Size: 274 KiB |
BIN
web/_img/canvas/place30/006_005.png
Normal file
After Width: | Height: | Size: 138 KiB |
BIN
web/_img/canvas/place30/007_005.png
Normal file
After Width: | Height: | Size: 172 KiB |
BIN
web/_img/canvas/place30/008_005.png
Normal file
After Width: | Height: | Size: 183 KiB |
BIN
web/_img/canvas/place30/009_005.png
Normal file
After Width: | Height: | Size: 189 KiB |
BIN
web/_img/canvas/place30/010_005.png
Normal file
After Width: | Height: | Size: 191 KiB |
BIN
web/_img/canvas/place30/011_016.png
Normal file
After Width: | Height: | Size: 158 KiB |
BIN
web/_img/canvas/place30/012_016.png
Normal file
After Width: | Height: | Size: 146 KiB |
BIN
web/_img/canvas/place30/013_016.png
Normal file
After Width: | Height: | Size: 133 KiB |
BIN
web/_img/canvas/place30/014_016.png
Normal file
After Width: | Height: | Size: 116 KiB |
BIN
web/_img/canvas/place30/015_016.png
Normal file
After Width: | Height: | Size: 95 KiB |
BIN
web/_img/canvas/place30/016.png
Normal file
After Width: | Height: | Size: 206 KiB |
BIN
web/_img/canvas/place30/017_016.png
Normal file
After Width: | Height: | Size: 91 KiB |
BIN
web/_img/canvas/place30/018_016.png
Normal file
After Width: | Height: | Size: 109 KiB |
BIN
web/_img/canvas/place30/019_016.png
Normal file
After Width: | Height: | Size: 123 KiB |
BIN
web/_img/canvas/place30/020_016.png
Normal file
After Width: | Height: | Size: 131 KiB |
BIN
web/_img/canvas/place30/021_016.png
Normal file
After Width: | Height: | Size: 136 KiB |
BIN
web/_img/canvas/place30/022_027.png
Normal file
After Width: | Height: | Size: 141 KiB |
BIN
web/_img/canvas/place30/023_027.png
Normal file
After Width: | Height: | Size: 130 KiB |
BIN
web/_img/canvas/place30/024_027.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
web/_img/canvas/place30/025_027.png
Normal file
After Width: | Height: | Size: 113 KiB |
BIN
web/_img/canvas/place30/026_027.png
Normal file
After Width: | Height: | Size: 93 KiB |
BIN
web/_img/canvas/place30/027.png
Normal file
After Width: | Height: | Size: 254 KiB |
BIN
web/_img/canvas/place30/028_027.png
Normal file
After Width: | Height: | Size: 97 KiB |
BIN
web/_img/canvas/place30/029_027.png
Normal file
After Width: | Height: | Size: 122 KiB |
BIN
web/_img/canvas/place30/030_027.png
Normal file
After Width: | Height: | Size: 136 KiB |
BIN
web/_img/canvas/place30/031_027.png
Normal file
After Width: | Height: | Size: 148 KiB |
BIN
web/_img/canvas/place30/032_027.png
Normal file
After Width: | Height: | Size: 152 KiB |
BIN
web/_img/canvas/place30/033_038.png
Normal file
After Width: | Height: | Size: 149 KiB |
BIN
web/_img/canvas/place30/034_038.png
Normal file
After Width: | Height: | Size: 141 KiB |
BIN
web/_img/canvas/place30/035_038.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
web/_img/canvas/place30/036_038.png
Normal file
After Width: | Height: | Size: 108 KiB |
BIN
web/_img/canvas/place30/037_038.png
Normal file
After Width: | Height: | Size: 83 KiB |
BIN
web/_img/canvas/place30/038.png
Normal file
After Width: | Height: | Size: 244 KiB |
BIN
web/_img/canvas/place30/039_038.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
web/_img/canvas/place30/040_038.png
Normal file
After Width: | Height: | Size: 100 KiB |
BIN
web/_img/canvas/place30/041_038.png
Normal file
After Width: | Height: | Size: 110 KiB |
BIN
web/_img/canvas/place30/042_038.png
Normal file
After Width: | Height: | Size: 120 KiB |
BIN
web/_img/canvas/place30/043_038.png
Normal file
After Width: | Height: | Size: 128 KiB |
BIN
web/_img/canvas/place30/044_049.png
Normal file
After Width: | Height: | Size: 129 KiB |
BIN
web/_img/canvas/place30/045_049.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
web/_img/canvas/place30/046_049.png
Normal file
After Width: | Height: | Size: 113 KiB |
BIN
web/_img/canvas/place30/047_049.png
Normal file
After Width: | Height: | Size: 102 KiB |
BIN
web/_img/canvas/place30/048_049.png
Normal file
After Width: | Height: | Size: 81 KiB |
BIN
web/_img/canvas/place30/049.png
Normal file
After Width: | Height: | Size: 242 KiB |
BIN
web/_img/canvas/place30/050_049.png
Normal file
After Width: | Height: | Size: 84 KiB |
BIN
web/_img/canvas/place30/051_049.png
Normal file
After Width: | Height: | Size: 97 KiB |
BIN
web/_img/canvas/place30/052_049.png
Normal file
After Width: | Height: | Size: 110 KiB |
BIN
web/_img/canvas/place30/053_049.png
Normal file
After Width: | Height: | Size: 120 KiB |
BIN
web/_img/canvas/place30/054_049.png
Normal file
After Width: | Height: | Size: 156 KiB |
BIN
web/_img/canvas/place30/055_060.png
Normal file
After Width: | Height: | Size: 345 KiB |
BIN
web/_img/canvas/place30/056_060.png
Normal file
After Width: | Height: | Size: 368 KiB |
BIN
web/_img/canvas/place30/057_060.png
Normal file
After Width: | Height: | Size: 352 KiB |
BIN
web/_img/canvas/place30/058_060.png
Normal file
After Width: | Height: | Size: 307 KiB |
BIN
web/_img/canvas/place30/059_060.png
Normal file
After Width: | Height: | Size: 229 KiB |
BIN
web/_img/canvas/place30/060.png
Normal file
After Width: | Height: | Size: 551 KiB |
BIN
web/_img/canvas/place30/061_060.png
Normal file
After Width: | Height: | Size: 221 KiB |
BIN
web/_img/canvas/place30/062_060.png
Normal file
After Width: | Height: | Size: 285 KiB |
BIN
web/_img/canvas/place30/063_060.png
Normal file
After Width: | Height: | Size: 321 KiB |
BIN
web/_img/canvas/place30/064_060.png
Normal file
After Width: | Height: | Size: 342 KiB |
BIN
web/_img/canvas/place30/065_060.png
Normal file
After Width: | Height: | Size: 367 KiB |
BIN
web/_img/canvas/place30/066_071.png
Normal file
After Width: | Height: | Size: 307 KiB |
BIN
web/_img/canvas/place30/067_071.png
Normal file
After Width: | Height: | Size: 277 KiB |
BIN
web/_img/canvas/place30/068_071.png
Normal file
After Width: | Height: | Size: 244 KiB |
BIN
web/_img/canvas/place30/069_071.png
Normal file
After Width: | Height: | Size: 200 KiB |
BIN
web/_img/canvas/place30/070_071.png
Normal file
After Width: | Height: | Size: 140 KiB |
BIN
web/_img/canvas/place30/071.png
Normal file
After Width: | Height: | Size: 566 KiB |
BIN
web/_img/canvas/place30/072_071.png
Normal file
After Width: | Height: | Size: 144 KiB |
BIN
web/_img/canvas/place30/073_071.png
Normal file
After Width: | Height: | Size: 204 KiB |
BIN
web/_img/canvas/place30/074_071.png
Normal file
After Width: | Height: | Size: 243 KiB |
BIN
web/_img/canvas/place30/075_071.png
Normal file
After Width: | Height: | Size: 285 KiB |
BIN
web/_img/canvas/place30/076_071.png
Normal file
After Width: | Height: | Size: 295 KiB |
BIN
web/_img/canvas/place30/077_082.png
Normal file
After Width: | Height: | Size: 289 KiB |
BIN
web/_img/canvas/place30/078_082.png
Normal file
After Width: | Height: | Size: 259 KiB |