From d4018bcc3119c02d0b1f145d6b1fc6b8865279d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Mon, 4 Nov 2024 09:44:25 +0100 Subject: [PATCH 01/12] feat: add JSON endpoint for items and update metadata configuration --- .github/workflows/process_data.py | 290 +++++++++++++++--------------- _config.yml | 3 +- _data/config-metadata.csv | 2 - utilities/items.json | 7 + 4 files changed, 153 insertions(+), 149 deletions(-) create mode 100644 utilities/items.json diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 577912d6..13b30b86 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -1,3 +1,4 @@ +import json import logging import os from urllib.parse import urljoin, urlparse @@ -16,49 +17,18 @@ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) - -# Function to get items from a collection -def get_items_from_collection(collection_id): - url = urljoin(OMEKA_API_URL, "items") - all_items = [] - params = { - "item_set_id": collection_id, - "key_identity": KEY_IDENTITY, - "key_credential": KEY_CREDENTIAL, - "per_page": 100, - } - - while True: - response = requests.get(url, params=params) - if response.status_code != 200: - logging.error(f"Error: {response.status_code}") - break - items = response.json() - all_items.extend(items) - next_url = None - for link in response.links: - if response.links[link]["rel"] == "next": - next_url = response.links[link]["url"] - break - if not next_url: - break - url = next_url - return all_items - - -# Function to get media for an item -def get_media(item_id): - url = urljoin(OMEKA_API_URL, f"media?item_id={item_id}") - params = {"key_identity": KEY_IDENTITY, "key_credential": KEY_CREDENTIAL} - response = requests.get(url, params=params) - if response.status_code != 200: - logging.error(f"Error: {response.status_code}") - return None - return response.json() +# --- Helper Functions for Data Extraction --- +def is_valid_url(url): + """Checks if a URL is valid.""" + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False -# Function to download file def download_file(url, dest_path): + """Downloads a file from a given URL to the specified destination path.""" os.makedirs(os.path.dirname(dest_path), exist_ok=True) try: with requests.get(url, stream=True) as r: @@ -66,45 +36,59 @@ def download_file(url, dest_path): with open(dest_path, "wb") as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) - except requests.exceptions.HTTPError as http_err: - logging.error(f"HTTP error occurred: {http_err}") - except Exception as err: - logging.error(f"Other error occurred: {err}") + except requests.exceptions.RequestException as err: + logging.error(f"File download error: {err}") + + +def get_paginated_items(url, params): + """Fetches all items from a paginated API endpoint.""" + items = [] + while url: + try: + response = requests.get(url, params=params) + response.raise_for_status() + except requests.exceptions.RequestException as err: + logging.error(f"Error fetching items: {err}") + break + items.extend(response.json()) + url = response.links.get("next", {}).get("url") + params = None + return items -# Function to check if URL is valid -def is_valid_url(url): - try: - result = urlparse(url) - return all([result.scheme, result.netloc]) - except ValueError: - return False + +def get_items_from_collection(collection_id): + """Fetches all items from a specified collection.""" + params = { + "item_set_id": collection_id, + "key_identity": KEY_IDENTITY, + "key_credential": KEY_CREDENTIAL, + "per_page": 100, + } + return get_paginated_items(urljoin(OMEKA_API_URL, "items"), params) -# Helper functions to extract data -def extract_prop_value(props, prop_id): - return next( - ( - prop.get("@value", "") - for prop in props - if prop.get("property_id") == prop_id - ), - "", +def get_media(item_id): + """Fetches media associated with a specific item ID.""" + params = {"key_identity": KEY_IDENTITY, "key_credential": KEY_CREDENTIAL} + return get_paginated_items( + urljoin(OMEKA_API_URL, f"media?item_id={item_id}"), params ) -def extract_prop_uri(props, prop_id): - return next( - ( - f"[{prop.get('o:label', '')}]({prop.get('@id', '')})" - for prop in props - if prop.get("property_id") == prop_id - ), - "", - ) +# --- Data Extraction and Transformation Functions --- +def extract_property(props, prop_id, as_uri=False): + """Extracts a property value or URI from properties based on property ID.""" + for prop in props: + if prop.get("property_id") == prop_id: + if as_uri: + return f"[{prop.get('o:label', '')}]({prop.get('@id', '')})" + return prop.get("@value", "") + return "" -def extract_combined_list(props): +def extract_combined_values(props): + """Combines text values and URIs from properties into a single list.""" values = [ prop.get("@value", "").replace(";", ";") for prop in props @@ -115,50 +99,28 @@ def extract_combined_list(props): for prop in props if "@id" in prop ] - combined = values + uris + return values + uris + + +def extract_combined_values_csv(props): + """Combines text values and URIs into a semicolon-separated string.""" + combined = extract_combined_values(props) return ";".join(combined) -def extract_item_data(item): - # Download the thumbnail image if available and valid - image_url = item.get("thumbnail_display_urls", {}).get("large", "") - local_image_path = "" +def download_thumbnail(image_url): + """Downloads the thumbnail image if the URL is valid.""" if image_url and is_valid_url(image_url): filename = os.path.basename(image_url) local_image_path = f"objects/{filename}" if not os.path.exists(local_image_path): download_file(image_url, local_image_path) - - logging.info(f"Item ID: {item['o:id']}") - - return { - "objectid": extract_prop_value(item.get("dcterms:identifier", []), 10), - "parentid": "", - "title": extract_prop_value(item.get("dcterms:title", []), 1), - "description": extract_prop_value(item.get("dcterms:description", []), 4), - "subject": extract_combined_list(item.get("dcterms:subject", [])), - "era": extract_prop_value(item.get("dcterms:temporal", []), 41), - "isPartOf": extract_combined_list(item.get("dcterms:isPartOf", [])), - "creator": extract_combined_list(item.get("dcterms:creator", [])), - "publisher": extract_combined_list(item.get("dcterms:publisher", [])), - "source": extract_combined_list(item.get("dcterms:source", [])), - "date": extract_prop_value(item.get("dcterms:date", []), 7), - "type": extract_prop_uri(item.get("dcterms:type", []), 8), - "format": extract_prop_value(item.get("dcterms:format", []), 9), - "extent": extract_prop_value(item.get("dcterms:extent", []), 25), - "language": extract_prop_value(item.get("dcterms:language", []), 12), - "relation": extract_combined_list(item.get("dcterms:relation", [])), - "rights": extract_prop_value(item.get("dcterms:rights", []), 15), - "license": extract_prop_value(item.get("dcterms:license", []), 49), - "display_template": "compound_object", - "object_location": "", - "image_small": local_image_path, - "image_thumb": local_image_path, - "image_alt_text": item.get("o:alt_text", ""), - } + return local_image_path + return "" def infer_display_template(format_value): + """Infers the display template type based on the format value.""" if "image" in format_value.lower(): return "image" elif "pdf" in format_value.lower(): @@ -169,18 +131,48 @@ def infer_display_template(format_value): return "record" +def extract_item_data(item): + """Extracts relevant data from an item and downloads its thumbnail if available.""" + local_image_path = download_thumbnail( + item.get("thumbnail_display_urls", {}).get("large", "") + ) + + return { + "objectid": extract_property(item.get("dcterms:identifier", []), 10), + "parentid": "", + "title": extract_property(item.get("dcterms:title", []), 1), + "description": extract_property(item.get("dcterms:description", []), 4), + "subject": extract_combined_values(item.get("dcterms:subject", [])), + "era": extract_property(item.get("dcterms:temporal", []), 41), + "isPartOf": extract_combined_values(item.get("dcterms:isPartOf", [])), + "creator": extract_combined_values(item.get("dcterms:creator", [])), + "publisher": extract_combined_values(item.get("dcterms:publisher", [])), + "source": extract_combined_values(item.get("dcterms:source", [])), + "date": extract_property(item.get("dcterms:date", []), 7), + "type": extract_property(item.get("dcterms:type", []), 8, as_uri=True), + "format": extract_property(item.get("dcterms:format", []), 9), + "extent": extract_property(item.get("dcterms:extent", []), 25), + "language": extract_property(item.get("dcterms:language", []), 12), + "relation": extract_combined_values(item.get("dcterms:relation", [])), + "rights": extract_property(item.get("dcterms:rights", []), 15), + "license": extract_property(item.get("dcterms:license", []), 49), + "display_template": "compound_object", + "object_location": "", + "image_small": local_image_path, + "image_thumb": local_image_path, + "image_alt_text": item.get("o:alt_text", ""), + } + + def extract_media_data(media, item_dc_identifier): - format_value = extract_prop_value(media.get("dcterms:format", []), 9) + """Extracts relevant data from a media item associated with a specific item.""" + format_value = extract_property(media.get("dcterms:format", []), 9) display_template = infer_display_template(format_value) # Download the thumbnail image if available and valid - image_url = media.get("thumbnail_display_urls", {}).get("large", "") - local_image_path = "" - if image_url and is_valid_url(image_url): - filename = os.path.basename(image_url) - local_image_path = f"objects/{filename}" - if not os.path.exists(local_image_path): - download_file(image_url, local_image_path) + local_image_path = download_thumbnail( + media.get("thumbnail_display_urls", {}).get("large", "") + ) # Extract media data object_location = ( @@ -191,24 +183,24 @@ def extract_media_data(media, item_dc_identifier): logging.info(f"is_public: {media.get('o:is_public')}") return { - "objectid": extract_prop_value(media.get("dcterms:identifier", []), 10), + "objectid": extract_property(media.get("dcterms:identifier", []), 10), "parentid": item_dc_identifier, - "title": extract_prop_value(media.get("dcterms:title", []), 1), - "description": extract_prop_value(media.get("dcterms:description", []), 4), - "subject": extract_combined_list(media.get("dcterms:subject", [])), - "era": extract_prop_value(media.get("dcterms:temporal", []), 41), - "isPartOf": extract_combined_list(media.get("dcterms:isPartOf", [])), - "creator": extract_combined_list(media.get("dcterms:creator", [])), - "publisher": extract_combined_list(media.get("dcterms:publisher", [])), - "source": extract_combined_list(media.get("dcterms:source", [])), - "date": extract_prop_value(media.get("dcterms:date", []), 7), - "type": extract_prop_uri(media.get("dcterms:type", []), 8), + "title": extract_property(media.get("dcterms:title", []), 1), + "description": extract_property(media.get("dcterms:description", []), 4), + "subject": extract_combined_values(media.get("dcterms:subject", [])), + "era": extract_property(media.get("dcterms:temporal", []), 41), + "isPartOf": extract_combined_values(media.get("dcterms:isPartOf", [])), + "creator": extract_combined_values(media.get("dcterms:creator", [])), + "publisher": extract_combined_values(media.get("dcterms:publisher", [])), + "source": extract_combined_values(media.get("dcterms:source", [])), + "date": extract_property(media.get("dcterms:date", []), 7), + "type": extract_property(media.get("dcterms:type", []), 8, as_uri=True), "format": format_value, - "extent": extract_prop_value(media.get("dcterms:extent", []), 25), - "language": extract_prop_value(media.get("dcterms:language", []), 12), - "relation": extract_combined_list(media.get("dcterms:relation", [])), - "rights": extract_prop_value(media.get("dcterms:rights", []), 15), - "license": extract_prop_value(media.get("dcterms:license", []), 49), + "extent": extract_property(media.get("dcterms:extent", []), 25), + "language": extract_property(media.get("dcterms:language", []), 12), + "relation": extract_combined_values(media.get("dcterms:relation", [])), + "rights": extract_property(media.get("dcterms:rights", []), 15), + "license": extract_property(media.get("dcterms:license", []), 49), "display_template": display_template, "object_location": object_location, "image_small": local_image_path, @@ -217,37 +209,43 @@ def extract_media_data(media, item_dc_identifier): } -# Main function to download item set and generate CSV +# --- Main Processing Function --- def main(): - # Download item set - collection_id = ITEM_SET_ID - items_data = get_items_from_collection(collection_id) + # Fetch item data + items_data = get_items_from_collection(ITEM_SET_ID) - # Extract item data - item_records = [] - media_records = [] + # Process each item and associated media + item_records, media_records = [], [] for item in items_data: item_record = extract_item_data(item) item_records.append(item_record) - - # Extract media data for each item - item_dc_identifier = item_record["objectid"] + media_data = get_media(item.get("o:id", "")) + # Process media data for each item media_data = get_media(item["o:id"]) if media_data: for media in media_data: - media_records.append(extract_media_data(media, item_dc_identifier)) + media_records.append(extract_media_data(media, item_record["objectid"])) - # Combine item and media records - combined_records = item_records + media_records + # Save data to CSV and JSON formats + save_to_files(item_records + media_records, CSV_PATH, JSON_PATH) - # Create DataFrame - df = pd.DataFrame(combined_records) - # Save to CSV - csv_path = "_data/sgb-metadata.csv" - os.makedirs(os.path.dirname(csv_path), exist_ok=True) - df.to_csv(csv_path, index=False) +def save_to_files(records, csv_path, json_path): + """Saves data to both CSV and JSON files.""" + with open(json_path, "w", encoding="utf-8") as f: + json.dump(records, f, ensure_ascii=False) + logging.info(f"JSON file has been saved to {json_path}") + # Convert list of records to a DataFrame and save as CSV + records = [ + { + key: ";".join(value) if isinstance(value, list) else value + for key, value in record.items() + } + for record in records + ] + df = pd.DataFrame(records) + df.to_csv(csv_path, index=False) logging.info(f"CSV file has been saved to {csv_path}") diff --git a/_config.yml b/_config.yml index 4c70d549..58376bfb 100644 --- a/_config.yml +++ b/_config.yml @@ -36,7 +36,8 @@ lang: de # # Set the metadata for your collection (the name of the CSV file in your _data directory that describes the objects in your collection) # Use the filename of your CSV **without** the ".csv" extension! E.g. _data/demo-metadata.csv --> "demo-metadata" -metadata: sgb-metadata +metadata: sgb-metadata-csv +metadata-json: sgb-metadata-json # page generation settings [optional!] # [optional: only used if you need to tweak CB defaults or generate from more than one data file] # page_gen: diff --git a/_data/config-metadata.csv b/_data/config-metadata.csv index 4a22a693..073cbe3d 100644 --- a/_data/config-metadata.csv +++ b/_data/config-metadata.csv @@ -16,5 +16,3 @@ language,Sprache,true,,DCTERMS.language,inLanguage rights,Rechte,,,DCTERMS.rights,usageInfo license,Lizenz,,true,DCTERMS.license,license relation,Verwandte Ressourcen,,true,DCTERMS.relation,relation - - diff --git a/utilities/items.json b/utilities/items.json new file mode 100644 index 00000000..93b3165d --- /dev/null +++ b/utilities/items.json @@ -0,0 +1,7 @@ +--- +layout: null +permalink: /items.json +--- +{%- assign items = site.data[site.metadata-json] -%} + +[{% for item in items %}{{ item | jsonify }}{% unless forloop.last %},{% endunless %}{% endfor %}] From 36bcbd5234a88b696dae27cdc9b49011c79649e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 10:52:22 +0100 Subject: [PATCH 02/12] feat: update metadata configuration and enhance data processing scripts --- .github/workflows/process_data.py | 12 ++++++++++++ _data/config-metadata.csv | 2 +- _data/theme.yml | 4 ++-- assets/data/metadata.csv | 2 +- assets/data/metadata.json | 6 +++--- package.json | 4 +++- utilities/items.json | 7 ------- 7 files changed, 22 insertions(+), 15 deletions(-) delete mode 100644 utilities/items.json diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 13b30b86..2825b1d7 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -6,17 +6,29 @@ import pandas as pd import requests +# TODO https://chatgpt.com/share/671621c7-3c20-8011-8956-9bd9248a1fa2 +# TODO https://omeka.unibe.ch/omeka-new/admin/item?item_set_id=6508 + # Configuration OMEKA_API_URL = os.getenv("OMEKA_API_URL") KEY_IDENTITY = os.getenv("KEY_IDENTITY") KEY_CREDENTIAL = os.getenv("KEY_CREDENTIAL") ITEM_SET_ID = os.getenv("ITEM_SET_ID") +CSV_PATH = os.getenv("CSV_PATH", "_data/sgb-metadata-csv.csv") +JSON_PATH = os.getenv("JSON_PATH", "_data/sgb-metadata-json.json") + +OMEKA_API_URL = "https://omeka.unibe.ch/api/" +# OMEKA_API_URL = "https://omeka.unibe.ch/omeka-new/api/" +KEY_IDENTITY = "sEJ4wdtCBpSkCzSPBpluzoqBzmkRCvw7" +KEY_CREDENTIAL = "0v8AZu265B8EKkCCYOEkKQeOGBieejmy" +ITEM_SET_ID = 10780 # Set up logging logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) + # --- Helper Functions for Data Extraction --- def is_valid_url(url): """Checks if a URL is valid.""" diff --git a/_data/config-metadata.csv b/_data/config-metadata.csv index 073cbe3d..34c1afe2 100644 --- a/_data/config-metadata.csv +++ b/_data/config-metadata.csv @@ -2,6 +2,7 @@ field,display_name,browse_link,external_link,dc_map,schema_map objectid,Identifikator,,,DCTERMS.identifier,identifier title,Titel,,,DCTERMS.title,headline description,Beschreibung,,,DCTERMS.description,description +relation,Verwandte Ressourcen,,true,DCTERMS.relation,relation subject,Thema,true,,DCTERMS.subject,keywords era,Epoche,true,,DCTERMS.temporal, isPartOf,Gehört zu,,true,DCTERMS.isPartOf,isPartOf @@ -15,4 +16,3 @@ extent,Auflösung,,,DCTERMS.extent, language,Sprache,true,,DCTERMS.language,inLanguage rights,Rechte,,,DCTERMS.rights,usageInfo license,Lizenz,,true,DCTERMS.license,license -relation,Verwandte Ressourcen,,true,DCTERMS.relation,relation diff --git a/_data/theme.yml b/_data/theme.yml index b4a4c27a..50826a49 100644 --- a/_data/theme.yml +++ b/_data/theme.yml @@ -68,9 +68,9 @@ year-nav-increment: 1 # set increments to auto gen nav years # # add metadata fields for export in data downloads (tip: paste in first row of csv) # comma delimited list, reference url is automatic -metadata-export-fields: "title,creator,date,description,subject,source,identifier,type,format,language,rights,rightsstatement" +metadata-export-fields: "objectid,parentid,title,description,relation,subject,era,isPartOf,creator,publisher,source,date,type,format,extent,language,rights,license" # generate a facets list for given fields, comma delimited -metadata-facets-fields: "subject,creator,format" +metadata-facets-fields: "subject,creator,publisher,type,format,language,license" ########## # Compound Objects diff --git a/assets/data/metadata.csv b/assets/data/metadata.csv index 6572fb79..dd70fedb 100644 --- a/assets/data/metadata.csv +++ b/assets/data/metadata.csv @@ -4,5 +4,5 @@ {%- assign items = site.data[site.metadata] | where_exp: 'item','item.objectid' -%} {%- assign fields = site.data.theme.metadata-export-fields | split: "," -%} {{ fields | join: "," }},object_thumb,object_location,reference_url -{% for item in items %}{% for f in fields %}"{{ item[f] | escape }}",{% endfor %}"{{ item.image_thumb | absolute_url }}","{{ item.object_location | absolute_url }}",{{ '/items/' | relative_url }}{% if item.parentid %}{{ item.parentid }}.html#{{ item.objectid }}{% else %}{{ item.objectid }}.html{% endif %} +{% for item in items %}{% for f in fields %}"{{ item[f] | escape }}",{% endfor %}"{{ item.image_thumb | absolute_url }}","{{ item.object_location | absolute_url }}",{{ '/items/' | absolute_url }}{% if item.parentid %}{{ item.parentid }}.html#{{ item.objectid }}{% else %}{{ item.objectid }}.html{% endif %} {% endfor %} diff --git a/assets/data/metadata.json b/assets/data/metadata.json index 01f20814..24728cd8 100644 --- a/assets/data/metadata.json +++ b/assets/data/metadata.json @@ -1,7 +1,7 @@ --- # metadata for json export --- -{%- assign items = site.data[site.metadata] | where_exp: 'item','item.objectid' -%} +{%- assign items = site.data[site.metadata-json] -%} {%- assign fields = site.data.theme.metadata-export-fields | split: "," -%} { "objects": [ {%- for item in items -%} @@ -9,7 +9,7 @@ {% for f in fields %}{% if item[f] %}{{ f | jsonify }}: {{ item[f] | jsonify }},{% endif %} {% endfor %}{% if item.image_thumb %}"object_thumb": "{{ item.image_thumb | absolute_url }}",{% endif %} "object_location": "{{ item.object_location | absolute_url }}", - "reference_url": "{{ '/items/' | relative_url }}{% if item.parentid %}{{ item.parentid }}.html#{{ item.objectid }}{% else %}{{ item.objectid }}.html{% endif %}" + "reference_url": "{{ '/items/' | absolute_url }}{% if item.parentid.size > 0 %}{{ item.parentid }}.html#{{ item.objectid }}{% else %}{{ item.objectid }}.html{% endif %}" }{% unless forloop.last %},{% endunless %} {% endfor %} -] } +] } \ No newline at end of file diff --git a/package.json b/package.json index 75ed5140..97d985c9 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,9 @@ { "scripts": { "check": "prettier --check '**/*.{js,css,md,html}'", - "format": "prettier --write '**/*.{js,css,md,html}'" + "format": "prettier --write '**/*.{js,css,md,html}'", + "clean": "rm _data/sgb-metadata-* && find objects -type f ! -name 'README.md' -delete", + "populate": "uv run .github/workflows/process_data.py" }, "dependencies": { "@shopify/prettier-plugin-liquid": "^1.5.2", diff --git a/utilities/items.json b/utilities/items.json deleted file mode 100644 index 93b3165d..00000000 --- a/utilities/items.json +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: null -permalink: /items.json ---- -{%- assign items = site.data[site.metadata-json] -%} - -[{% for item in items %}{{ item | jsonify }}{% unless forloop.last %},{% endunless %}{% endfor %}] From c272ba3dcf463976a0d83f5161af5e763c2d194a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 10:52:49 +0100 Subject: [PATCH 03/12] refactor: optimize item filtering and improve loading icon handling --- _includes/js/browse-js.html | 57 +++++++++++++------------------------ 1 file changed, 19 insertions(+), 38 deletions(-) diff --git a/_includes/js/browse-js.html b/_includes/js/browse-js.html index 67b86624..d3edb235 100644 --- a/_includes/js/browse-js.html +++ b/_includes/js/browse-js.html @@ -84,55 +84,36 @@ } /* filter items function */ -function filterItems(arr,q) { - // show loading icon +function filterItems(arr, q) { + // Show loading icon loadingIcon.classList.remove("d-none"); - // dont filter if no q - if (q=="") { - var filteredItems = arr; - } else { - q = q.trim().toUpperCase(); - // js indexOf filter - var filteredItems = []; - for (var i = 0, len = arr.length; i < len; i++) { - var val = ""; - for (var k in arr[i]) { val += arr[i][k] + " "; } - if(val.toUpperCase().indexOf(q) != -1){ - filteredItems.push(arr[i]); - } - } - } - // add number - document.querySelector("#numberOf").innerHTML = filteredItems.length + " / {{ items | size }}"; - - // add stuff, make cards first in giant var, then add all at once to speed things up - var cards = ""; - for (var i = 0, len = filteredItems.length; i < len; i++) { - cards += makeCard(filteredItems[i]); - } + + // Trim and convert query to uppercase + const query = q.trim().toUpperCase(); + + // Filter items based on query + const filteredItems = query === "" ? arr : arr.filter(item => { + const itemValues = Object.values(item).join(" ").toUpperCase(); + return itemValues.includes(query); + }); + + // Update number of filtered items + document.querySelector("#numberOf").innerHTML = `${filteredItems.length} / {{ items | size }}`; + + // Generate and display cards for filtered items + const cards = filteredItems.map(makeCard).join(""); browseItemsDiv.innerHTML = cards; - // finish + // Finish filterTextBox.focus(); loadingIcon.classList.add("d-none"); }; -/* Fisher-Yates shuffle https://bost.ocks.org/mike/shuffle/ */ -function shuffle(array) { - var m = array.length, t, i; - while (m) { - i = Math.floor(Math.random() * m--); - t = array[m]; - array[m] = array[i]; - array[i] = t; - } - return array; -} /* init browse page */ /* randomize items once at page load */ -shuffle(items); +items = items.sort(() => Math.random() - 0.5); /* set some elements */ var loadingIcon = document.querySelector("#loadingIcon"); From c35f4482d85597020c858609d7e9e2c54231cb4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 11:19:01 +0100 Subject: [PATCH 04/12] chore: remove unused API configuration and clean up comments in process_data.py --- .github/workflows/process_data.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 2825b1d7..8493a7a1 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -6,9 +6,6 @@ import pandas as pd import requests -# TODO https://chatgpt.com/share/671621c7-3c20-8011-8956-9bd9248a1fa2 -# TODO https://omeka.unibe.ch/omeka-new/admin/item?item_set_id=6508 - # Configuration OMEKA_API_URL = os.getenv("OMEKA_API_URL") KEY_IDENTITY = os.getenv("KEY_IDENTITY") @@ -17,12 +14,6 @@ CSV_PATH = os.getenv("CSV_PATH", "_data/sgb-metadata-csv.csv") JSON_PATH = os.getenv("JSON_PATH", "_data/sgb-metadata-json.json") -OMEKA_API_URL = "https://omeka.unibe.ch/api/" -# OMEKA_API_URL = "https://omeka.unibe.ch/omeka-new/api/" -KEY_IDENTITY = "sEJ4wdtCBpSkCzSPBpluzoqBzmkRCvw7" -KEY_CREDENTIAL = "0v8AZu265B8EKkCCYOEkKQeOGBieejmy" -ITEM_SET_ID = 10780 - # Set up logging logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" From 905ed7e2696309e13e9ade232332964d012dcd1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 11:28:48 +0100 Subject: [PATCH 05/12] ci: update artifact names for metadata CSV and JSON uploads in Jekyll workflow --- .github/workflows/jekyll.yml | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/.github/workflows/jekyll.yml b/.github/workflows/jekyll.yml index 801bb10e..e7edf25b 100644 --- a/.github/workflows/jekyll.yml +++ b/.github/workflows/jekyll.yml @@ -35,11 +35,16 @@ jobs: with: python-version: '3.13' - run: uv run .github/workflows/process_data.py - - name: Upload sgb-metadata.csv + - name: Upload sgb-metadata-csv.csv uses: actions/upload-artifact@v4 with: - name: sgb-metadata - path: _data/sgb-metadata.csv + name: sgb-metadata-csv + path: _data/sgb-metadata-csv.csv + - name: Upload sgb-metadata-json.json + uses: actions/upload-artifact@v4 + with: + name: sgb-metadata-json + path: _data/sgb-metadata-json.json - name: Upload objects folder uses: actions/upload-artifact@v4 with: @@ -52,11 +57,15 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - - name: Download sgb-metadata.csv + - name: Download sgb-metadata-csv.csv uses: actions/download-artifact@v4 with: - name: sgb-metadata + name: sgb-metadata-csv path: _data + - name: Download sgb-metadata-json.json + uses: actions/download-artifact@v4 + with: + name: sgb-metadata-json - name: Download objects folder uses: actions/download-artifact@v4 with: From a0d8789a75395bb6c724bfa8fbb2f89e29ee128a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 11:35:08 +0100 Subject: [PATCH 06/12] feat: validate ITEM_SET_ID environment variable and improve error handling in process_data.py --- .github/workflows/process_data.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 8493a7a1..7d00adc6 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -11,6 +11,12 @@ KEY_IDENTITY = os.getenv("KEY_IDENTITY") KEY_CREDENTIAL = os.getenv("KEY_CREDENTIAL") ITEM_SET_ID = os.getenv("ITEM_SET_ID") +if not ITEM_SET_ID: + raise ValueError("ITEM_SET_ID environment variable must be set") +try: + ITEM_SET_ID = int(ITEM_SET_ID) +except ValueError: + raise ValueError("ITEM_SET_ID must be a valid integer") CSV_PATH = os.getenv("CSV_PATH", "_data/sgb-metadata-csv.csv") JSON_PATH = os.getenv("JSON_PATH", "_data/sgb-metadata-json.json") @@ -41,6 +47,7 @@ def download_file(url, dest_path): f.write(chunk) except requests.exceptions.RequestException as err: logging.error(f"File download error: {err}") + raise def get_paginated_items(url, params): @@ -223,8 +230,6 @@ def main(): item_record = extract_item_data(item) item_records.append(item_record) media_data = get_media(item.get("o:id", "")) - # Process media data for each item - media_data = get_media(item["o:id"]) if media_data: for media in media_data: media_records.append(extract_media_data(media, item_record["objectid"])) From 807502b55ba7d68cc6ff21f8b5c36a3f0c925a2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 11:35:43 +0100 Subject: [PATCH 07/12] ci: specify download path for metadata JSON artifact in Jekyll workflow --- .github/workflows/jekyll.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/jekyll.yml b/.github/workflows/jekyll.yml index e7edf25b..29ce61fc 100644 --- a/.github/workflows/jekyll.yml +++ b/.github/workflows/jekyll.yml @@ -66,6 +66,7 @@ jobs: uses: actions/download-artifact@v4 with: name: sgb-metadata-json + path: _data - name: Download objects folder uses: actions/download-artifact@v4 with: From 725018d34bec85deeb21b91bd8b4eb5d693b1785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Sun, 10 Nov 2024 11:51:50 +0100 Subject: [PATCH 08/12] feat: refactoring --- _includes/js/browse-js.html | 260 +++++++++++++++++------------------- 1 file changed, 125 insertions(+), 135 deletions(-) diff --git a/_includes/js/browse-js.html b/_includes/js/browse-js.html index d3edb235..08819f4f 100644 --- a/_includes/js/browse-js.html +++ b/_includes/js/browse-js.html @@ -5,189 +5,179 @@ {% endif %} {%- assign fields = site.data['config-browse'] -%} From f35e89c6f5fc209e24e17d4caee83632b57d2842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Tue, 12 Nov 2024 16:57:23 +0100 Subject: [PATCH 09/12] fix: provide default thumbnail path when downloading images in process_data.py --- .github/workflows/process_data.py | 8 ++------ assets/img/no-image.png | Bin 0 -> 3319 bytes 2 files changed, 2 insertions(+), 6 deletions(-) create mode 100644 assets/img/no-image.png diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 7d00adc6..27aca598 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -143,9 +143,7 @@ def infer_display_template(format_value): def extract_item_data(item): """Extracts relevant data from an item and downloads its thumbnail if available.""" - local_image_path = download_thumbnail( - item.get("thumbnail_display_urls", {}).get("large", "") - ) + local_image_path = download_thumbnail(item.get("thumbnail_display_urls", {}).get("large", "")) or "assets/img/no-image.png" return { "objectid": extract_property(item.get("dcterms:identifier", []), 10), @@ -180,9 +178,7 @@ def extract_media_data(media, item_dc_identifier): display_template = infer_display_template(format_value) # Download the thumbnail image if available and valid - local_image_path = download_thumbnail( - media.get("thumbnail_display_urls", {}).get("large", "") - ) + local_image_path = download_thumbnail(media.get("thumbnail_display_urls", {}).get("large", "")) or "assets/img/no-image.png" # Extract media data object_location = ( diff --git a/assets/img/no-image.png b/assets/img/no-image.png new file mode 100644 index 0000000000000000000000000000000000000000..274f85d0027fa2ccc57d766bbaab6b1e1c593e56 GIT binary patch literal 3319 zcmb7Hd0bOh7EV|~f^3hV5DZF__>5v9i^^sVLP#2TVn`YN;evn{U7K6dq3qIzGFc?cu^ABqYEZt3|UI1`HBr0@#d>mZkg{vwp*Cv$)6C_qL4mN}!N_k;VznOj} z{O_W$Imb#Thx$O?-MsxT+<3Pa%2Ms9Hpes2!Omi&;)VQDILG2My7gyt`_tJ3H|3Fh zR*+*t&Wu)zk6b!K=5&T6VgB80^$d9?#PzC?&XDLM`M=7a2?H)%`)_+Y^Qs3m|2}Ma zF>R?`#5CN{t3Z2XE8|vKyfeHu=Fv@DwkNKVe$swnDq%%o_^Pm@E@7UuAhE5d(my@U zcd*i@SYi0*$>-+|3S5GW2oI}kr(xp7C72ra99b^{^HX6BbV!iG71v_p#Ui&Z6;jlT zOq(hDle<)8%mk%jE7{#rPW{3qsW7?wXgD>1STw4# z%j!k^dc-V(*?C1m&48zPPB^yG0zbc#UCPU$9JN$===*~zE&T&N9|+oEBeK_zdJVsv z+thGJQ!udHi5EBJ!#ZXvZn^E1Aa&9XhqeyLFBd!e$f3iU9lO&=k9_>*I((1G3rV}t zlGXY36liFES~;|+{KlejKKQ1vbE`wMQ4 zFMKC{GC!y%@TpWz<_GmA>;Ibg3+U2&{Ov@{6Pq6S+r)k!2!wUiLU1GR&G2o--yBg@ z9e-Nkjbn_Y8r6@zjzADpasf;9@L$KAD;U9|r2Ji5`X#4}*?}Q%FjX7xG27Y`}=Jj?jn@sVmDF#4!kT zjV?6hP%sOUYiLKTh|`k>oHp}I3|S=zLUU}kP;92D?C1tAm&XQPh=f>#F_PtrW>>rk z=DD{;zLdwBle!wV=Qmf82(V=sNGG?WYq(sT;%%K@31ZS*Z&@~(NiP51b|s@m{vl;1 zm}U$J4XEYQRK0qLvxn6s*O;Db0+qe`!J)QhH-u}tjCwTeEs%cCTThxm{`x@&n36J6 ztx!FqCPy3u<8$b?O27doS9Tx&3AK)?$S^Gq_L?LNMFx#01uAB+xSh)%#%yFo%H7nB ziIo&)@To5f3U&BR3y4z-pp_foxz5zOYe*E0|0VGH2a7KOJ-U{aEGMfQ<9mSRRAIqB z6MoZFaNE#IFW9?;S{H?&Kp#(`0zH`iqS{hqBaPpC9E}GS4h;SdC=6za_6u2Z`KOYS z0$CKb6hW)p)v=X8VBqpofU(GgpGBO$OQ++_VAja_$VL+csFusGV06i6gVjJFdhF8b zc(2XFUHjkm(C=61+#c1|s+Knk@tw`qHL*E$nQM%?@j;`mn87;1d@{2U&o-!TmLE@0 z2_l|fT?E@Byjk0~Q;ake2MU9<?xXXQVMlN?Tcd?u~d!j?BGxUu42dj;FDftr{*)JF{V#yO5ECZ~+RW@)SysI_tlAU0Z5_qC z$?y~W@RvQ>mpZ9CMKh7HMVGz_)1Y6T8}c(+M;fXY&#NmxSzRJX zALvW)ejEmmV5{3>I0qgs6}5coEeeN6)b*ij2xUnB{p=r8&TAwN0Tn%8G{rg#aSqAd zv8)r0hoAKK>rn7)?l95(xV)?ll{;F#yS8o}otIiEvf{UBPpPg=*|(4=fJgd|eM7>+BhDv| zkv@P&0=`O=D6j2nyb=SztF_1sZP$M`!-o4Yjd*B>^SVa3?jW+-#^evIFejyN{r zStndWEAD*E>OOv+&(=O%dir2~cYMtb>Ylr3 z`wm|bJNwc_F&n_tR=!zyVAT0}22CYtDR%0V4enBWwS#JqP&fFNAbz!V;L__csG+~L zpGGVqo6G6%N}Y(Fol*=jOADb|pI?0eV}5Lwz1>rW#a!& z@NP%`{oCYitNtGx`**_RA#jQUhc0l45^JFj3DtaH_^XEcZi{~Jy%jZa(D;K!{l~5U zPxEZlF^A+oah^Q7xjfrVT1oG<;Z#|Lspp@ro0AZ>wYPcrs8z{#&`RH}M~@D*5uUE1 zd8W9^InbD^JYTvf)HI8-1v2b{nrZgRDy(9DYzYn#WeIE<0xUeInPwAPV#z3ob=II$ z;8l$19D_6Rk64zq;sYg6T9}m%&9ofDG~~df^;v>ax}nxuF-vKusj43CTNz(&SFG+WMIL@>IzdqTN%XoPYEoyK| z94|N4xM~Nc#GQHlEV+?EP1y# zUg;;Vj8>0(dv46=Npt1gDSh*G0lqVyElbB%UVWKfy?1*g_-?@nLL<1 Date: Wed, 13 Nov 2024 09:21:18 +0100 Subject: [PATCH 10/12] chore: update @shopify/prettier-plugin-liquid to version 1.6.0 in package.json --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 97d985c9..d796b125 100644 --- a/package.json +++ b/package.json @@ -6,7 +6,7 @@ "populate": "uv run .github/workflows/process_data.py" }, "dependencies": { - "@shopify/prettier-plugin-liquid": "^1.5.2", + "@shopify/prettier-plugin-liquid": "^1.6.0", "prettier": "^3.3.3" } } From 14d188250bc7e10898f966c94cafecdf66170402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Wed, 13 Nov 2024 10:51:02 +0100 Subject: [PATCH 11/12] fix: update thumbnail download logic and upgrade dependencies in package-lock.json --- .github/workflows/process_data.py | 9 +++++++-- package-lock.json | 16 ++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 27aca598..4c46989b 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -143,7 +143,10 @@ def infer_display_template(format_value): def extract_item_data(item): """Extracts relevant data from an item and downloads its thumbnail if available.""" - local_image_path = download_thumbnail(item.get("thumbnail_display_urls", {}).get("large", "")) or "assets/img/no-image.png" + local_image_path = ( + download_thumbnail(item.get("thumbnail_display_urls", {}).get("large", "")) + or "assets/img/no-image.png" + ) return { "objectid": extract_property(item.get("dcterms:identifier", []), 10), @@ -178,7 +181,9 @@ def extract_media_data(media, item_dc_identifier): display_template = infer_display_template(format_value) # Download the thumbnail image if available and valid - local_image_path = download_thumbnail(media.get("thumbnail_display_urls", {}).get("large", "")) or "assets/img/no-image.png" + local_image_path = download_thumbnail( + media.get("thumbnail_display_urls", {}).get("large", "") + ) # Extract media data object_location = ( diff --git a/package-lock.json b/package-lock.json index f350c073..3fb998e4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -5,14 +5,14 @@ "packages": { "": { "dependencies": { - "@shopify/prettier-plugin-liquid": "^1.5.2", + "@shopify/prettier-plugin-liquid": "^1.6.0", "prettier": "^3.3.3" } }, "node_modules/@shopify/liquid-html-parser": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@shopify/liquid-html-parser/-/liquid-html-parser-2.0.5.tgz", - "integrity": "sha512-MYp/fe3jSjzmRu6HlbaG/1IJpdB39iShdvnc5biDPjlBhLr0PH/2rHXVdthlAcYDhJvd7DTd7TV0kl3erpUNGg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@shopify/liquid-html-parser/-/liquid-html-parser-2.1.0.tgz", + "integrity": "sha512-csoReVpvWZM3nBuyFWHH625kzjxoD+XFxiDxh6F7xh4rqqF3/y1k1hYocAHgATZqBnz+7O5JdWphjvfZwptJCQ==", "license": "MIT", "dependencies": { "line-column": "^1.0.2", @@ -20,12 +20,12 @@ } }, "node_modules/@shopify/prettier-plugin-liquid": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/@shopify/prettier-plugin-liquid/-/prettier-plugin-liquid-1.5.2.tgz", - "integrity": "sha512-KjTuNFxyyVZQYAX9+TqKq3NuQhg9Or7PLbot8Q1y18fIjN3rjf6z6Cw3nzn845cW7GSdWW7awyvzaWXMw+sPqQ==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@shopify/prettier-plugin-liquid/-/prettier-plugin-liquid-1.6.0.tgz", + "integrity": "sha512-8SdulmzqrIXxGjps3T33OwR1wmmnYdsd9Fu3Cy5xPBdeKCFW1Z4VkVXEMrp5P9WkqoMCJS/RSEO+Ds1bF0S8fw==", "license": "MIT", "dependencies": { - "@shopify/liquid-html-parser": "^2.0.5", + "@shopify/liquid-html-parser": "^2.1.0", "html-styles": "^1.0.0" }, "peerDependencies": { From d602e0c8167694f5f4c85bf36c2da6633acb0478 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20M=C3=A4hr?= Date: Wed, 13 Nov 2024 13:06:15 +0100 Subject: [PATCH 12/12] fix: update default thumbnail path to SVG and add no-image.svg asset --- .github/workflows/process_data.py | 2 +- assets/img/no-image.png | Bin 3319 -> 0 bytes assets/img/no-image.svg | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) delete mode 100644 assets/img/no-image.png create mode 100644 assets/img/no-image.svg diff --git a/.github/workflows/process_data.py b/.github/workflows/process_data.py index 4c46989b..9a16490b 100644 --- a/.github/workflows/process_data.py +++ b/.github/workflows/process_data.py @@ -145,7 +145,7 @@ def extract_item_data(item): """Extracts relevant data from an item and downloads its thumbnail if available.""" local_image_path = ( download_thumbnail(item.get("thumbnail_display_urls", {}).get("large", "")) - or "assets/img/no-image.png" + or "assets/img/no-image.svg" ) return { diff --git a/assets/img/no-image.png b/assets/img/no-image.png deleted file mode 100644 index 274f85d0027fa2ccc57d766bbaab6b1e1c593e56..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3319 zcmb7Hd0bOh7EV|~f^3hV5DZF__>5v9i^^sVLP#2TVn`YN;evn{U7K6dq3qIzGFc?cu^ABqYEZt3|UI1`HBr0@#d>mZkg{vwp*Cv$)6C_qL4mN}!N_k;VznOj} z{O_W$Imb#Thx$O?-MsxT+<3Pa%2Ms9Hpes2!Omi&;)VQDILG2My7gyt`_tJ3H|3Fh zR*+*t&Wu)zk6b!K=5&T6VgB80^$d9?#PzC?&XDLM`M=7a2?H)%`)_+Y^Qs3m|2}Ma zF>R?`#5CN{t3Z2XE8|vKyfeHu=Fv@DwkNKVe$swnDq%%o_^Pm@E@7UuAhE5d(my@U zcd*i@SYi0*$>-+|3S5GW2oI}kr(xp7C72ra99b^{^HX6BbV!iG71v_p#Ui&Z6;jlT zOq(hDle<)8%mk%jE7{#rPW{3qsW7?wXgD>1STw4# z%j!k^dc-V(*?C1m&48zPPB^yG0zbc#UCPU$9JN$===*~zE&T&N9|+oEBeK_zdJVsv z+thGJQ!udHi5EBJ!#ZXvZn^E1Aa&9XhqeyLFBd!e$f3iU9lO&=k9_>*I((1G3rV}t zlGXY36liFES~;|+{KlejKKQ1vbE`wMQ4 zFMKC{GC!y%@TpWz<_GmA>;Ibg3+U2&{Ov@{6Pq6S+r)k!2!wUiLU1GR&G2o--yBg@ z9e-Nkjbn_Y8r6@zjzADpasf;9@L$KAD;U9|r2Ji5`X#4}*?}Q%FjX7xG27Y`}=Jj?jn@sVmDF#4!kT zjV?6hP%sOUYiLKTh|`k>oHp}I3|S=zLUU}kP;92D?C1tAm&XQPh=f>#F_PtrW>>rk z=DD{;zLdwBle!wV=Qmf82(V=sNGG?WYq(sT;%%K@31ZS*Z&@~(NiP51b|s@m{vl;1 zm}U$J4XEYQRK0qLvxn6s*O;Db0+qe`!J)QhH-u}tjCwTeEs%cCTThxm{`x@&n36J6 ztx!FqCPy3u<8$b?O27doS9Tx&3AK)?$S^Gq_L?LNMFx#01uAB+xSh)%#%yFo%H7nB ziIo&)@To5f3U&BR3y4z-pp_foxz5zOYe*E0|0VGH2a7KOJ-U{aEGMfQ<9mSRRAIqB z6MoZFaNE#IFW9?;S{H?&Kp#(`0zH`iqS{hqBaPpC9E}GS4h;SdC=6za_6u2Z`KOYS z0$CKb6hW)p)v=X8VBqpofU(GgpGBO$OQ++_VAja_$VL+csFusGV06i6gVjJFdhF8b zc(2XFUHjkm(C=61+#c1|s+Knk@tw`qHL*E$nQM%?@j;`mn87;1d@{2U&o-!TmLE@0 z2_l|fT?E@Byjk0~Q;ake2MU9<?xXXQVMlN?Tcd?u~d!j?BGxUu42dj;FDftr{*)JF{V#yO5ECZ~+RW@)SysI_tlAU0Z5_qC z$?y~W@RvQ>mpZ9CMKh7HMVGz_)1Y6T8}c(+M;fXY&#NmxSzRJX zALvW)ejEmmV5{3>I0qgs6}5coEeeN6)b*ij2xUnB{p=r8&TAwN0Tn%8G{rg#aSqAd zv8)r0hoAKK>rn7)?l95(xV)?ll{;F#yS8o}otIiEvf{UBPpPg=*|(4=fJgd|eM7>+BhDv| zkv@P&0=`O=D6j2nyb=SztF_1sZP$M`!-o4Yjd*B>^SVa3?jW+-#^evIFejyN{r zStndWEAD*E>OOv+&(=O%dir2~cYMtb>Ylr3 z`wm|bJNwc_F&n_tR=!zyVAT0}22CYtDR%0V4enBWwS#JqP&fFNAbz!V;L__csG+~L zpGGVqo6G6%N}Y(Fol*=jOADb|pI?0eV}5Lwz1>rW#a!& z@NP%`{oCYitNtGx`**_RA#jQUhc0l45^JFj3DtaH_^XEcZi{~Jy%jZa(D;K!{l~5U zPxEZlF^A+oah^Q7xjfrVT1oG<;Z#|Lspp@ro0AZ>wYPcrs8z{#&`RH}M~@D*5uUE1 zd8W9^InbD^JYTvf)HI8-1v2b{nrZgRDy(9DYzYn#WeIE<0xUeInPwAPV#z3ob=II$ z;8l$19D_6Rk64zq;sYg6T9}m%&9ofDG~~df^;v>ax}nxuF-vKusj43CTNz(&SFG+WMIL@>IzdqTN%XoPYEoyK| z94|N4xM~Nc#GQHlEV+?EP1y# zUg;;Vj8>0(dv46=Npt1gDSh*G0lqVyElbB%UVWKfy?1*g_-?@nLL<1 \ No newline at end of file