Merge branch 'remaster' into fix-line-end

This commit is contained in:
Fabian Wunsch 2022-04-17 09:12:12 +02:00 committed by GitHub
commit a80f070173
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
243 changed files with 14026 additions and 4128 deletions

1
.gitignore vendored
View file

@ -14,3 +14,4 @@ combined.js
.vscode/
web/atlas-before-ids-migration.json
*.pyc
tools/read-ids-temp.txt

View file

@ -19,7 +19,7 @@ To contribute to the map, we require a certain format for artwork region and lab
### GitHub Submission
1. Create a fork of our repo.
2. Enter your data into the `web/_js/atlas.js` file, with the correct format and ID number.
2. Enter your data into the `web/atlas.json` file, with the correct format and ID number.
3. Create a Pull Request.
-->
@ -27,9 +27,9 @@ To contribute to the map, we require a certain format for artwork region and lab
## Map Edits
1. Create a fork of our repo.
2. Enter your data into the `web/_js/atlas.json` file, with the correct format and ID number.
2. Enter your data into the `web/atlas.json` file, with the correct format and ID number.
3. Create a Pull Request against the `/cleanup` branch.
## Cleaning Contributions
If you spot a duplicate, please PR against `/cleanup`. To help find duplicates, append `?mode=overlap` to the url: [`https://place-atlas.stefanocoding.me?mode=overlap`](https://place-atlas.stefanocoding.me?mode=overlap).
If you spot a duplicate, please PR against `/cleanup`. To help find duplicates, append `?mode=overlap` to the url: [`https://place-atlas.stefanocoding.me?mode=overlap`](https://place-atlas.stefanocoding.me?mode=overlap).

View file

@ -1,2 +0,0 @@
/*
Access-Control-Allow-Origin: *

8853
data/read-ids.txt Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,4 +0,0 @@
[[headers]]
for = "/*"
[headers.values]
Access-Control-Allow-Origin = "*"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

After

Width:  |  Height:  |  Size: 42 KiB

192
tools/calculate_center.py Normal file
View file

@ -0,0 +1,192 @@
"""
From https://github.com/Twista/python-polylabel/,
which is in turn implemented from https://github.com/mapbox/polylabel
"""
from math import sqrt, log10
import time
from typing import Tuple, List
# Python3
from queue import PriorityQueue
from math import inf
Point = Tuple[float, float]
Polygon = List[Point]
SQRT2 = sqrt(2)
def _point_to_polygon_distance(x: float, y: float, polygon: Polygon) -> float:
inside: bool = False
min_distance_squared: float = inf
previous: Point = polygon[-1]
for current in polygon:
if ((current[1] > y) != (previous[1] > y) and
(x < (previous[0] - current[0]) * (y - current[1]) / (previous[1] - current[1]) + current[0])):
inside = not inside
min_distance_squared = min(min_distance_squared, _get_segment_distance_squared(x, y, current, previous))
previous = current
result: float = sqrt(min_distance_squared)
if not inside:
return -result
return result
def _get_segment_distance_squared(px: float, py: float, point_a: Point, point_b: Point) -> float:
x: float = point_a[0]
y: float = point_a[1]
dx: float = point_b[0] - x
dy: float = point_b[1] - y
if dx != 0 or dy != 0:
t = ((px - x) * dx + (py - y) * dy) / (dx * dx + dy * dy)
if t > 1:
x = point_b[0]
y = point_b[1]
elif t > 0:
x += dx * t
y += dy * t
dx = px - x
dy = py - y
return dx * dx + dy * dy
class Cell(object):
def __init__(self, x: float, y: float, h: float, polygon: Polygon, centroid: Point):
self.h: float = h
self.y: float = y
self.x: float = x
min_dist = _point_to_polygon_distance(x, y, polygon)
self.min_dist: float = min_dist
self.center_dist: float = (centroid[0] - x) ** 2 + (centroid[1] - y) ** 2
self.max = self.min_dist + self.h * SQRT2
self.weight = -self.center_dist - self.max
def __lt__(self, other):
return self.max < other.max
def __lte__(self, other):
return self.max <= other.max
def __gt__(self, other):
return self.max > other.max
def __gte__(self, other):
return self.max >= other.max
def __eq__(self, other):
return self.max == other.max
def _get_centroid(polygon: Polygon) -> Point:
area: float = 0
x: float = 0
y: float = 0
previous: Point = polygon[-1]
for current in polygon:
f: float = current[0] * previous[1] - previous[0] * current[1]
x += (current[0] + previous[0]) * f
y += (current[1] + previous[1]) * f
area += f * 3
previous =current
if area == 0:
return (polygon[0][0], polygon[0][1])
return (x / area, y / area)
def _get_centroid_cell(polygon: Polygon, centroid: Point) -> Cell:
return Cell(centroid[0], centroid[1], 0, polygon, centroid)
def polylabel(polygon: Polygon, precision: float=0.5, debug: bool=False):
# find bounding box
first_item: Point = polygon[0]
min_x: float = first_item[0]
min_y: float = first_item[1]
max_x: float = first_item[0]
max_y: float = first_item[1]
for p in polygon:
if p[0] < min_x:
min_x = p[0]
if p[1] < min_y:
min_y = p[1]
if p[0] > max_x:
max_x = p[0]
if p[1] > max_y:
max_y = p[1]
width: float = max_x - min_x
height: float = max_y - min_y
cell_size: float = min(width, height)
h: float = cell_size / 2.0
cell_queue: PriorityQueue[Tuple[float, int, Cell]] = PriorityQueue()
if cell_size == 0:
return [(max_x - min_x) / 2, (max_y - min_y) / 2]
centroid: Point = _get_centroid(polygon)
# cover polygon with initial cells
x: float = min_x
while x < max_x:
y: float = min_y
while y < max_y:
c: Cell = Cell(x + h, y + h, h, polygon, centroid)
y += cell_size
cell_queue.put((c.weight, time.time(), c))
x += cell_size
best_cell: Cell = _get_centroid_cell(polygon, centroid)
bbox_cell: Cell = Cell(min_x + width / 2, min_y + height / 2, 0, polygon, centroid)
if bbox_cell.min_dist > best_cell.min_dist:
best_cell = bbox_cell
# how much closer is an point allowed to be to the border,
# while having a shorter distance to the centroid
threshold: float = log10(cell_size) / 3.0
num_of_probes = cell_queue.qsize()
while not cell_queue.empty():
_, __, cell = cell_queue.get()
# update if either the cell is further from the edge,
# or if it is sufficiently similary far from the edge,
# but closer to the centroid
if (cell.min_dist > best_cell.min_dist
or (
cell.center_dist < best_cell.center_dist
and cell.min_dist > best_cell.min_dist - threshold
)
):
best_cell = cell
if debug:
print(f'found best {round(cell.min_dist, 4)};{round(sqrt(cell.center_dist), 4)} after {num_of_probes} probes')
if cell.max - best_cell.min_dist <= precision:
continue
h = cell.h / 2
c = Cell(cell.x - h, cell.y - h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x + h, cell.y - h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x - h, cell.y + h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
c = Cell(cell.x + h, cell.y + h, h, polygon, centroid)
cell_queue.put((c.weight, time.time(), c))
num_of_probes += 4
if debug:
print(f'num probes: {num_of_probes}')
print(f'best distance: {best_cell.min_dist}')
return [best_cell.x, best_cell.y]

View file

@ -2,6 +2,9 @@
import re
import json
import math
from calculate_center import polylabel
"""
Examples:
@ -25,6 +28,7 @@
"pattern1user": r'\/*(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern2user": r'^\/*(?:u|user)(?!\/)([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/$)?',
"pattern3user": r'(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com\/(?:u|user)\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*',
"pattern1new": r'(?:(?:(?:(?:https?:\/\/)?(?:(?:www|old|new|np)\.)?)?reddit\.com)?\/)?[rR]\/([A-Za-z0-9][A-Za-z0-9_]{1,20})(?:\/[^" ]*)*'
# "pattern4": r'(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*',
# "pattern5": r'\[(?:https?:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\]\((?:https:\/\/)?(?!^www\.)(.+)\.reddit\.com(?:\/[^"]*)*\)"',
}
@ -52,27 +56,43 @@ def format_subreddit(entry: dict):
"""
Fix formatting of the value on "subreddit".
"""
if not "subreddit" in entry or not entry['subreddit']:
return entry
subredditLink = entry["subreddit"]
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
if "subreddit" in entry and entry["subreddit"]:
subredditLink = entry["subreddit"]
subredditLink = re.sub(FS_REGEX["commatization"], ', ', subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2"], SUBREDDIT_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern3user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1user"], USER_TEMPLATE, subredditLink)
subredditLink = re.sub(FS_REGEX["pattern2user"], USER_TEMPLATE, subredditLink)
entry["subreddit"] = subredditLink
if "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
subredditLink = entry["links"]["subreddit"][i]
subredditLink = re.sub(FS_REGEX["pattern3"], r"\1", subredditLink)
subredditLink = re.sub(FS_REGEX["pattern1new"], r"\1", subredditLink)
entry["links"]["subreddit"][i] = subredditLink
if not subredditLink:
return entry
entry["subreddit"] = subredditLink
return entry
def collapse_links(entry: dict):
"""
Collapses Markdown links.
"""
if "website" in entry and entry['website']:
website = entry["website"];
website = entry["website"]
if re.search(CL_REGEX, website):
match = re.search(CL_REGEX, website)
if match.group(1) == match.group(2):
@ -80,8 +100,23 @@ def collapse_links(entry: dict):
entry["website"] = website
elif "links" in entry and "website" in entry["links"]:
for i in range(len(entry["links"]["website"])):
website = entry["links"]["website"][i]
if re.search(CL_REGEX, website):
match = re.search(CL_REGEX, website)
if match.group(1) == match.group(2):
website = match.group(2)
entry["links"]["website"][i] = website
if "subreddit" in entry and entry['subreddit']:
subreddit = entry["subreddit"];
subreddit = entry["subreddit"]
if re.search(CL_REGEX, subreddit):
match = re.search(CL_REGEX, subreddit)
if match.group(1) == match.group(2):
@ -89,12 +124,27 @@ def collapse_links(entry: dict):
entry["subreddit"] = subreddit
elif "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
subreddit = entry["links"]["subreddit"][i]
if re.search(CL_REGEX, subreddit):
match = re.search(CL_REGEX, subreddit)
if match.group(1) == match.group(2):
subreddit = match.group(2)
entry["links"]["subreddit"][i] = subreddit
return entry
def remove_extras(entry: dict):
"""
Removing unnecessary extra characters and converts select characters.
"""
if "subreddit" in entry and entry["subreddit"]:
# if not entry["subreddit"].startswith('/r/'):
# entry["subreddit"] = re.sub(r'^(.*)(?=\/r\/)', r'', entry["subreddit"])
@ -120,10 +170,39 @@ def remove_extras(entry: dict):
return entry
def remove_duplicate_points(entry: dict):
"""
Removes points from paths that occur twice after each other
"""
if not "path" in entry:
return entry
if isinstance(entry['path'], list):
path: list = entry['path']
previous: list = path[0]
for i in range(len(path)-1, -1, -1):
current: list = path[i]
if current == previous:
path.pop(i)
previous = current
else:
for key in entry['path']:
path: list = entry['path'][key]
previous: list = path[0]
for i in range(len(path)-1, -1, -1):
current: list = path[i]
if current == previous:
path.pop(i)
previous = current
return entry
def fix_r_caps(entry: dict):
"""
Fixes capitalization of /r/. (/R/place -> /r/place)
"""
if not "description" in entry or not entry['description']:
return entry
@ -136,11 +215,14 @@ def fix_no_protocol_urls(entry: dict):
"""
Fixes URLs with no protocol by adding "https://" protocol.
"""
if not "website" in entry or not entry['website']:
return entry
if not entry["website"].startswith("http"):
entry["website"] = "https://" + entry["website"]
if "links" in entry and "website" in entry['links']:
for i in range(len(entry["links"]["website"])):
if entry["links"]["website"][i] and not entry["links"]["website"][i].startswith("http"):
entry["links"]["website"][i] = "https://" + entry["website"]
elif "website" in entry and entry['website']:
if not entry["website"].startswith("http"):
entry["website"] = "https://" + entry["website"]
return entry
@ -148,23 +230,43 @@ def convert_website_to_subreddit(entry: dict):
"""
Converts the subreddit link on "website" to "subreddit" if possible.
"""
if not "website" in entry or not entry['website']:
return entry
if re.match(CWTS_REGEX["url"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
if "links" in entry and "website" in entry["links"]:
for i in range(len(entry["links"]["website"])):
if re.match(CWTS_REGEX["url"], entry["links"]["website"][i]):
new_subreddit = re.sub(CWTS_REGEX["url"], r"\1", entry["links"]["website"][i])
if new_subreddit in entry["links"]["subreddit"]:
entry["links"]["website"][i] = ""
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
if not "subreddit" in entry["links"]:
entry["links"]["subreddit"] = []
entry["links"]["subreddit"].append(new_subreddit)
entry["links"]["website"][i] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["links"]["website"][i]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], r"\1", entry["links"]["website"][i])
if new_subreddit in entry["links"]["subreddit"]:
entry["links"]["website"][i] = ""
elif not "subreddit" in entry["links"] or len(entry["subreddit"]) == 0:
if not "subreddit" in entry["links"]:
entry["links"]["subreddit"] = []
entry["links"]["subreddit"].append(new_subreddit)
entry["links"]["website"][i] = ""
elif "website" in entry and entry['website']:
if re.match(CWTS_REGEX["url"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["url"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
elif re.match(CWTS_REGEX["subreddit"], entry["website"]):
new_subreddit = re.sub(CWTS_REGEX["subreddit"], SUBREDDIT_TEMPLATE, entry["website"])
if (new_subreddit.lower() == entry["subreddit"].lower()):
entry["website"] = ""
elif not "subreddit" in entry or entry['subreddit'] == "":
entry["subreddit"] = new_subreddit
entry["website"] = ""
return entry
@ -172,66 +274,85 @@ def convert_subreddit_to_website(entry: dict):
"""
Converts the links on "subreddit" to a "website" if needed. This also supports Reddit users (/u/reddit).
"""
if not "subreddit" in entry or not entry['subreddit']:
return entry
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
if (entry["website"].lower() == entry["subreddit"].lower()):
entry["subreddit"] = ""
elif not "website" in entry or entry['website'] == "":
entry["website"] = entry["subreddit"]
entry["subreddit"] = ""
elif re.match(CSTW_REGEX["user"], entry["subreddit"]):
if not "website" in entry or entry['website'] == "":
username = re.match(CSTW_REGEX["user"], entry["subreddit"]).group(1)
entry["website"] = "https://www.reddit.com/user/" + username
entry["subreddit"] = ""
if "links" in entry and "subreddit" in entry["links"]:
for i in range(len(entry["links"]["subreddit"])):
if re.match(CSTW_REGEX["website"], entry["links"]["subreddit"][i]):
if "website" in entry["links"] and entry["links"]["subreddit"][i] in entry["links"]["website"]:
entry["links"]["subreddit"][i] = ""
elif not "website" in entry["links"] or len(entry["website"]) == 0:
if not "website" in entry["links"]:
entry["links"]["website"] = []
entry["website"].append(entry["links"]["subreddit"][i])
entry["links"]["subreddit"][i] = ""
elif re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]):
if not "website" in entry["links"] or len(entry["website"]) == 0:
username = re.match(CSTW_REGEX["user"], entry["links"]["subreddit"][i]).group(1)
if not "website" in entry["links"]:
entry["links"]["website"] = []
entry["website"].append("https://www.reddit.com/user/" + username)
entry["links"]["subreddit"][i] = ""
elif "subreddit" in entry and entry['subreddit']:
if re.match(CSTW_REGEX["website"], entry["subreddit"]):
if (entry["website"].lower() == entry["subreddit"].lower()):
entry["subreddit"] = ""
elif not "website" in entry or entry['website'] == "":
entry["website"] = entry["subreddit"]
entry["subreddit"] = ""
elif re.match(CSTW_REGEX["user"], entry["subreddit"]):
if not "website" in entry or entry['website'] == "":
username = re.match(CSTW_REGEX["user"], entry["subreddit"]).group(1)
entry["website"] = "https://www.reddit.com/user/" + username
entry["subreddit"] = ""
return entry
def calculate_center(path: list):
"""
Caluclates the center of a polygon
adapted from /web/_js/draw.js:calucalteCenter()
"""
area = 0
x = 0
y = 0
for i in range(len(path)):
point1 = path[i]
point2 = path[i-1 if i != 0 else len(path)-1]
f = point1[0] * point2[1] - point2[0] * point1[1]
area += f
x += (point1[0] + point2[0]) * f
y += (point1[1] + point2[1]) * f
area *= 3
if area != 0:
return [x // area + 0.5, y // area + 0.5]
else:
# get the center of a straight line
max_x = max(i[0] for i in path)
min_x = min(i[0] for i in path)
max_y = max(i[1] for i in path)
min_y = min(i[1] for i in path)
return [(max_x + min_x) // 2 + 0.5, (max_y + min_y) // 2 + 0.5]
result = polylabel(path)
return [math.floor(result[0]) + 0.5, math.floor(result[1]) + 0.5]
def update_center(entry: dict):
"""
checks if the center of a entry is up to date, and updates it if it's either missing or outdated
checks if the center of a entry is up to date, and updates it if it's either missing or outdated.
"""
if 'path' not in entry:
return entry
path = entry['path']
if len(path) > 1:
calculated_center = calculate_center(path)
if 'center' not in entry or entry['center'] != calculated_center:
entry['center'] = calculated_center
if isinstance(entry['path'], list):
path = entry['path']
if len(path) > 1:
calculated_center = calculate_center(path)
if 'center' not in entry or entry['center'] != calculated_center:
entry['center'] = calculated_center
else:
for key in entry['path']:
path = entry['path'][key]
if len(path) > 1:
calculated_center = calculate_center(path)
if 'center' not in entry or key not in entry['center'] or entry['center'][key] != calculated_center:
entry['center'][key] = calculated_center
return entry
def remove_empty_and_similar(entry: dict):
"""
Removes empty items on lists, usually from the past formattings.
"""
if "links" in entry:
for key in entry["li/nks"]:
small = list(map(lambda x: x.lower(), entry["links"][key]))
entry["links"][key] = [x for x in entry["links"][key] if x and x.lower() in small]
return entry
def validate(entry: dict):
"""
Validates the entry. Catch errors and tell warnings related to the entry.
@ -242,17 +363,34 @@ def validate(entry: dict):
2: Warnings that may effect user experience when interacting with the entry
3: Errors that make the entry inaccessible or broken.
"""
return_status = 0
if (not "id" in entry or (not entry['id'] and not entry['id'] == 0)):
print(f"Wait, no id here! How did this happened? {entry}")
return_status = 3
entry['id'] = '[MISSING_ID]'
if not ("path" in entry and isinstance(entry["path"], list) and len(entry["path"]) > 0):
print(f"Entry {entry['id']} has no points!")
return_status = 3
elif len(entry["path"]) < 3:
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
if "path" in entry:
if isinstance(entry['path'], list):
if len(entry["path"]) == 0:
print(f"Entry {entry['id']} has no points!")
return_status = 3
elif len(entry["path"]) < 3:
print(f"Entry {entry['id']} only has {len(entry['path'])} point(s)!")
return_status = 3
else:
for key in entry['path']:
path = entry['path'][key]
if len(path) == 0:
print(f"Period {key} of entry {entry['id']} has no points!")
return_status = 3
elif len(path) < 3:
print(f"Period {key} of entry {entry['id']} only has {len(entry['path'])} point(s)!")
return_status = 3
else:
print(f"Entry {entry['id']} has no path at all!")
return_status = 3
for key in entry:
if key in VALIDATE_REGEX and not re.match(VALIDATE_REGEX[key], entry[key]):
if return_status < 2: return_status = 2
@ -298,8 +436,12 @@ def print_(*args, **kwargs):
entry = fix_no_protocol_urls(entry)
print_("Removing extras...")
entry = remove_extras(entry)
print_("Updating center")
print_("Removing duplicate points...")
entry = remove_duplicate_points(entry)
print_("Updating center...")
entry = update_center(entry)
print_("Remove empty items...")
entry = remove_empty_and_similar(entry)
print_("Validating...")
status_code = validate(entry)
print_("Completed!")

52
tools/merge_out.py Normal file
View file

@ -0,0 +1,52 @@
import praw
import json
import time
import re
import os
import traceback
from formatter import format_all, per_line_entries
out_ids = []
out_dupe_ids = []
out_edited_added_ids = []
atlas_ids = []
with open('temp_atlas.json', 'r', encoding='utf-8') as out_file:
out_json = json.loads(out_file.read())
with open('../web/atlas.json', 'r', encoding='utf-8') as atlas_file:
atlas_json = json.loads(atlas_file.read())
for entry in atlas_json:
atlas_ids.append(entry['id'])
for entry in out_json:
if (entry['id'] in out_ids):
print(f"Entry {entry['id']} has duplicates! Please resolve this conflict. This will be excluded from the merge.")
out_dupe_ids.append(entry['id'])
out_ids.append(entry['id'])
for entry in out_json:
if entry['id'] in out_dupe_ids:
continue
if ('edit' in entry and entry['edit']) or entry['id'] in out_ids:
index = next((i for i, item in enumerate(atlas_json) if item["id"] == entry['id']), None)
if 'edit' in entry:
out_edited_added_ids.append(entry['edit'])
del entry['edit']
if 'submitted_by' in atlas_json[index]:
atlas_json[index].contributors = [ atlas_json[index]['submitted_by'] ]
entry['contributors'] = atlas_json[index]['contributors'] + list(set(entry['contributors']) - set(atlas_json[index]['contributors']))
atlas_json[index] = entry
else:
atlas_json.append(entry)
print('Writing...')
with open('../web/atlas.json', 'w', encoding='utf-8') as atlas_file:
atlas_file.write(per_line_entries(atlas_json))
with open('../data/edit-ids.txt', 'a', encoding='utf-8') as edit_ids_file:
edit_ids_file.write('\n'.join(out_edited_added_ids) + '\n')
print('All done.')

View file

@ -7,6 +7,7 @@
from formatter import format_all
outfile = open('temp_atlas.json', 'w', encoding='utf-8')
editidsfile = open('read-ids-temp.txt', 'w')
failfile = open('manual_atlas.json', 'w', encoding='utf-8')
with open('credentials', 'r') as file:
@ -29,13 +30,16 @@
print("Warning: No write access. Post flairs will not be updated.")
time.sleep(5)
jsonfile = open("../web/atlas.json", "r", encoding='utf-8')
existing = json.load(jsonfile)
existing_ids = []
for item in existing:
existing_ids.append(item['id'])
with open('../data/edit-ids.txt', 'r') as edit_ids_file:
for id in [x.strip() for x in edit_ids_file.readlines()]:
existing_ids.append(id)
with open('../data/edit-ids.txt', 'r') as edit_ids_file:
for id in [x.strip() for x in edit_ids_file.readlines()]:
existing_ids.append(id)
def set_flair(submission, flair):
if has_write_access and submission.link_flair_text != flair:
@ -84,8 +88,8 @@ def set_flair(submission, flair):
break
else:
continue
if (submission.link_flair_text == "New Entry"):
if submission.link_flair_text == "New Entry" or submission.link_flair_text == "Edit Entry":
try:
@ -102,11 +106,41 @@ def set_flair(submission, flair):
if submission_json:
submission_json_dummy = {"id": submission.id, "submitted_by": ""}
try:
submission_json_dummy["submitted_by"] = submission.author.name
except AttributeError:
submission_json_dummy["submitted_by"] = "unknown"
if submission.link_flair_text == "Edit Entry":
assert submission_json["id"] != 0, "ID is tampered, it must not be 0!"
submission_json_dummy = {"id": submission_json["id"], "edit": True, "contributors": []}
if "submitted_by" in submission_json:
submission_json_dummy["contributors"].append(submission_json['submitted_by'])
del submission_json['submitted_by']
elif "contributors" in submission_json:
submission_json_dummy["contributors"] = submission_json["contributors"]
try:
if not submission.author.name in submission_json_dummy:
submission_json_dummy["contributors"].append(submission.author.name)
except AttributeError:
submission_json_dummy["contributors"].append("unknown")
else:
assert submission_json["id"] == 0, "ID is tampered, it must be 0!"
submission_json_dummy = {"id": submission.id, "contributors": []}
if "submitted_by" in submission_json:
submission_json_dummy["contributors"].append(submission_json['submitted_by'])
del submission_json['submitted_by']
elif "contributors" in submission_json:
submission_json_dummy["contributors"] = submission_json["contributors"]
try:
if not submission.author.name in submission_json_dummy:
submission_json_dummy["contributors"].append(submission.author.name)
except AttributeError:
submission_json_dummy["contributors"].append("unknown")
for key in submission_json:
if not key in submission_json_dummy:
submission_json_dummy[key] = submission_json[key];
@ -116,6 +150,7 @@ def set_flair(submission, flair):
"Submission invalid after validation. This may be caused by not enough points on the path."
outfile.write(json.dumps(submission_json, ensure_ascii=False) + ",\n")
editidsfile.write(submission.id + '\n')
successcount += 1
set_flair(submission, "Processed Entry")

Binary file not shown.

File diff suppressed because one or more lines are too long

11
web/_headers Normal file
View file

@ -0,0 +1,11 @@
/*
Access-Control-Allow-Origin: *
/_img/place/*.png
cache-control: public, max-age=604800
/_img/canvas/*/*.png
cache-control: public, max-age=604800
/_img/canvas/*.png
cache-control: public, max-age=604800

View file

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg version="1.1" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" width="71" height="55" x="0" y="0" viewBox="0 0 71 55">
<g>
<path fill="#FFF" d="M60.1045 4.8978C55.5792 2.8214 50.7265 1.2916 45.6527 0.41542C45.5603 0.39851 45.468 0.440769 45.4204 0.525289C44.7963 1.6353 44.105 3.0834 43.6209 4.2216C38.1637 3.4046 32.7345 3.4046 27.3892 4.2216C26.905 3.0581 26.1886 1.6353 25.5617 0.525289C25.5141 0.443589 25.4218 0.40133 25.3294 0.41542C20.2584 1.2888 15.4057 2.8186 10.8776 4.8978C10.8384 4.9147 10.8048 4.9429 10.7825 4.9795C1.57795 18.7309 -0.943561 32.1443 0.293408 45.3914C0.299005 45.4562 0.335386 45.5182 0.385761 45.5576C6.45866 50.0174 12.3413 52.7249 18.1147 54.5195C18.2071 54.5477 18.305 54.5139 18.3638 54.4378C19.7295 52.5728 20.9469 50.6063 21.9907 48.5383C22.0523 48.4172 21.9935 48.2735 21.8676 48.2256C19.9366 47.4931 18.0979 46.6 16.3292 45.5858C16.1893 45.5041 16.1781 45.304 16.3068 45.2082C16.679 44.9293 17.0513 44.6391 17.4067 44.3461C17.471 44.2926 17.5606 44.2813 17.6362 44.3151C29.2558 49.6202 41.8354 49.6202 53.3179 44.3151C53.3935 44.2785 53.4831 44.2898 53.5502 44.3433C53.9057 44.6363 54.2779 44.9293 54.6529 45.2082C54.7816 45.304 54.7732 45.5041 54.6333 45.5858C52.8646 46.6197 51.0259 47.4931 49.0921 48.2228C48.9662 48.2707 48.9102 48.4172 48.9718 48.5383C50.038 50.6034 51.2554 52.5699 52.5959 54.435C52.6519 54.5139 52.7526 54.5477 52.845 54.5195C58.6464 52.7249 64.529 50.0174 70.6019 45.5576C70.6551 45.5182 70.6887 45.459 70.6943 45.3942C72.1747 30.0791 68.2147 16.7757 60.1968 4.9823C60.1772 4.9429 60.1437 4.9147 60.1045 4.8978ZM23.7259 37.3253C20.2276 37.3253 17.3451 34.1136 17.3451 30.1693C17.3451 26.225 20.1717 23.0133 23.7259 23.0133C27.308 23.0133 30.1626 26.2532 30.1066 30.1693C30.1066 34.1136 27.28 37.3253 23.7259 37.3253ZM47.3178 37.3253C43.8196 37.3253 40.9371 34.1136 40.9371 30.1693C40.9371 26.225 43.7636 23.0133 47.3178 23.0133C50.9 23.0133 53.7545 26.2532 53.6986 30.1693C53.6986 34.1136 50.9 37.3253 47.3178 37.3253Z"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View file

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
width="272.84375"
height="177.59375"
id="svg2">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="translate(-116.4375,-348.78125)"
id="layer1">
<path
d="M 368.07001,380.00645 252.85714,495.16383 137.64427,380.00645"
id="path2985"
style="fill:none;stroke:#ffffff;stroke-width:30;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
width="165.15738"
height="260.42575"
id="svg2">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="translate(-170.27845,-307.37227)"
id="layer1">
<path
d="M 310.43583,552.79802 195.27845,437.58514 310.43583,322.37227"
id="path2985"
style="fill:none;stroke:#ffffff;stroke-width:30;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
width="165.15738"
height="260.42575"
id="svg2">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="translate(-170.27845,-307.37227)"
id="layer1">
<path
d="M 195.27845,552.79802 310.43583,437.58514 195.27845,322.37227"
id="path2985"
style="fill:none;stroke:#ffffff;stroke-width:30;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
width="15.26367"
height="15.3125"
id="svg2">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="translate(-188.36328,-392.51062)"
id="layer1">
<g
id="text2985"
style="font-size:20px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Open Sans;-inkscape-font-specification:Open Sans">
<path
d="m 188.36328,392.51062 15.26367,0 -7.63672,15.3125 -7.62695,-15.3125"
id="path2990"
style="fill:#ffffff;fill-opacity:0.63274339" />
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.3 KiB

View file

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
width="63.97776"
height="63.985764"
id="svg2">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
transform="translate(-239.44047,-363.22698)"
id="layer1">
<path
d="m 290.96332,384.19885 c -0.59424,-6.27896 -6.01049,-8.39164 -12.8518,-9.00126 l 0.0128,-8.70824 -5.30062,-0.007 -0.0123,8.47872 c -1.39346,-0.002 -2.81788,0.0235 -4.23308,0.0496 l 0.0135,-8.53485 -5.29778,-0.008 -0.0149,8.70575 c -1.14765,0.0217 -2.27469,0.0414 -3.3736,0.0405 l -5.3e-4,-0.0271 -7.31086,-0.0134 -0.007,5.66124 c 0,0 3.91442,-0.0688 3.84925,0.003 2.14703,0.004 2.84397,1.25133 3.04562,2.32724 l -0.0143,9.92049 c 0.14815,0.001 0.34143,0.008 0.56022,0.0378 -0.17659,-3e-5 -0.36561,-0.002 -0.55995,-1.2e-4 l -0.022,13.89765 c -0.0952,0.67556 -0.49402,1.75232 -1.99407,1.75269 0.0678,0.0602 -3.85316,-0.007 -3.85316,-0.007 l -1.06259,6.32874 6.89801,0.01 c 1.28367,0.003 2.54669,0.0271 3.78665,0.0368 l -0.0109,8.80736 5.29494,0.009 0.0128,-8.71427 c 1.45379,0.0322 2.86073,0.0459 4.23429,0.0466 l -0.0152,8.67412 5.30061,0.007 0.0147,-8.79173 c 8.91219,-0.49805 15.1527,-2.73385 15.93965,-11.10113 0.63533,-6.73752 -2.53,-9.74895 -7.5868,-10.97123 3.07554,-1.56033 5.00104,-4.31475 4.55848,-8.90928 z m -7.44387,18.82121 c -0.007,6.58182 -11.27551,5.81643 -14.86789,5.81449 l 0.0187,-11.66923 c 3.59343,0.006 14.85983,-1.01023 14.84919,5.85474 z m -2.44078,-16.46738 c -0.0106,5.98811 -9.40836,5.27536 -12.40064,5.27116 l 0.016,-10.58348 c 2.99229,0.004 12.39322,-0.93337 12.38461,5.31232 z"
id="path3010"
style="fill:#ffffff" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 532 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 149 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 345 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 368 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 551 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 221 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 342 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 566 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 243 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 295 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 289 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 259 KiB

Some files were not shown because too many files have changed in this diff Show more